code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _lowercase ( UpperCamelCase_=None , UpperCamelCase_=None ) -> Any:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCamelCase_ )
@dataclass
class lowercase__ :
A__ : List[str] =field(
metadata={"""help""": """The csv file to plot."""} , )
A__ : Any =field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
A__ : str =field(
default=_UpperCAmelCase , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
A__ : Union[str, Any] =field(
default=_UpperCAmelCase , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
A__ : Union[str, Any] =field(
default=_UpperCAmelCase , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
A__ : int =field(
default=_UpperCAmelCase , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
A__ : List[Any] =list_field(
default=_UpperCAmelCase , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def _lowercase ( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
try:
int(UpperCamelCase_ )
return True
except ValueError:
return False
def _lowercase ( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
try:
float(UpperCamelCase_ )
return True
except ValueError:
return False
class lowercase__ :
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
SCREAMING_SNAKE_CASE__ = csv.DictReader(_A )
for row in reader:
SCREAMING_SNAKE_CASE__ = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
SCREAMING_SNAKE_CASE__ = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
SCREAMING_SNAKE_CASE__ = float(row['result'] )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = plt.subplots()
SCREAMING_SNAKE_CASE__ = '''Time usage''' if self.args.is_time else '''Memory usage'''
SCREAMING_SNAKE_CASE__ = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
SCREAMING_SNAKE_CASE__ = sorted(set(self.result_dict[model_name]['bsz'] ) )
SCREAMING_SNAKE_CASE__ = sorted(set(self.result_dict[model_name]['seq_len'] ) )
SCREAMING_SNAKE_CASE__ = self.result_dict[model_name]['''result''']
(SCREAMING_SNAKE_CASE__) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
SCREAMING_SNAKE_CASE__ = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
SCREAMING_SNAKE_CASE__ = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_A , )
else:
SCREAMING_SNAKE_CASE__ = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
(SCREAMING_SNAKE_CASE__) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
SCREAMING_SNAKE_CASE__ = np.asarray(_A , _A )[: len(_A )]
plt.scatter(
_A , _A , label=F'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(_A , _A , '--' )
title_str += F' {label_model_name} vs.'
SCREAMING_SNAKE_CASE__ = title_str[:-4]
SCREAMING_SNAKE_CASE__ = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_A )
plt.xlabel(_A )
plt.ylabel(_A )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _lowercase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = HfArgumentParser(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE__ = Plot(args=UpperCamelCase_ )
plot.plot()
if __name__ == "__main__":
main()
| 176
|
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_UpperCamelCase : Any = logging.getLogger(__name__)
class snake_case__ ( UpperCamelCase):
a_ = "masked_bert"
def __init__( self : str , _A : Dict=3_05_22 , _A : Dict=7_68 , _A : Union[str, Any]=12 , _A : str=12 , _A : str=30_72 , _A : Dict="gelu" , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : Any=5_12 , _A : Union[str, Any]=2 , _A : Union[str, Any]=0.02 , _A : int=1e-12 , _A : Any=0 , _A : Any="topK" , _A : List[str]="constant" , _A : Dict=0.0 , **_A : int , ) -> Union[str, Any]:
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = pruning_method
UpperCAmelCase_ : Optional[int] = mask_init
UpperCAmelCase_ : List[Any] = mask_scale
| 304
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
class lowercase__ ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
a : int = "maskformer-swin"
a : Any = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self, __magic_name__=224, __magic_name__=4, __magic_name__=3, __magic_name__=96, __magic_name__=[2, 2, 6, 2], __magic_name__=[3, 6, 12, 24], __magic_name__=7, __magic_name__=4.0, __magic_name__=True, __magic_name__=0.0, __magic_name__=0.0, __magic_name__=0.1, __magic_name__="gelu", __magic_name__=False, __magic_name__=0.02, __magic_name__=1E-5, __magic_name__=None, __magic_name__=None, **__magic_name__, ) -> List[Any]:
"""simple docstring"""
super().__init__(**_A )
UpperCamelCase__ : Union[str, Any] = image_size
UpperCamelCase__ : List[Any] = patch_size
UpperCamelCase__ : Union[str, Any] = num_channels
UpperCamelCase__ : int = embed_dim
UpperCamelCase__ : Tuple = depths
UpperCamelCase__ : Tuple = len(_A )
UpperCamelCase__ : Any = num_heads
UpperCamelCase__ : Optional[int] = window_size
UpperCamelCase__ : List[str] = mlp_ratio
UpperCamelCase__ : Tuple = qkv_bias
UpperCamelCase__ : Dict = hidden_dropout_prob
UpperCamelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ : str = drop_path_rate
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Optional[int] = use_absolute_embeddings
UpperCamelCase__ : List[str] = layer_norm_eps
UpperCamelCase__ : int = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ : List[Any] = int(embed_dim * 2 ** (len(_A ) - 1) )
UpperCamelCase__ : Optional[int] = ['''stem'''] + [f"stage{idx}" for idx in range(1, len(_A ) + 1 )]
UpperCamelCase__ : str = get_aligned_output_features_output_indices(
out_features=_A, out_indices=_A, stage_names=self.stage_names )
| 201
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = StableDiffusionDiffEditPipeline
a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
a_ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a_ = frozenset([])
def A ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
UpperCAmelCase_ : Optional[int] = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_zero=_A , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : str , _A : List[str] , _A : Any=0 ) -> str:
UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Any = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : str = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Tuple , _A : Optional[Any] , _A : Optional[Any]=0 ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : int = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Dict = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Any = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[Any] = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : int , _A : Tuple , _A : List[str]=0 ) -> Any:
UpperCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[int] = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : List[str] ) -> Optional[Any]:
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Any = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_A )
UpperCAmelCase_ : str = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase_ : Any = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"`{optional_component}` did not stay set to None after loading." , )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_A )
UpperCAmelCase_ : List[Any] = pipe_loaded(**_A )[0]
UpperCAmelCase_ : Any = np.abs(output - output_loaded ).max()
self.assertLess(_A , 1e-4 )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : Optional[Any] = '''cpu'''
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_mask_inputs(_A )
UpperCAmelCase_ : int = pipe.generate_mask(**_A )
UpperCAmelCase_ : Tuple = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase_ : List[Any] = np.array([0] * 9 )
UpperCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def A ( self : str ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = '''cpu'''
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : str = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : List[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : int = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
def A ( self : Tuple ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : Any = '''cpu'''
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
UpperCAmelCase_ : Any = DPMSolverMultistepScheduler(**_A )
UpperCAmelCase_ : Optional[Any] = DPMSolverMultistepInverseScheduler(**_A )
UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : List[Any] = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase):
def A ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def A ( cls : Dict ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
UpperCAmelCase_ : int = raw_image.convert('''RGB''' ).resize((7_68, 7_68) )
UpperCAmelCase_ : Any = raw_image
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : int = torch.manual_seed(0 )
UpperCAmelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : List[str] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Tuple = '''a bowl of pears'''
UpperCAmelCase_ : Optional[int] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[str] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A ).latents
UpperCAmelCase_ : Any = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : str = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
def A ( self : Tuple ) -> List[str]:
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Dict = '''a bowl of pears'''
UpperCAmelCase_ : Union[str, Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[Any] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A , num_inference_steps=25 , ).latents
UpperCAmelCase_ : Dict = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : Tuple = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 304
| 0
|
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = SwinConfig()
_UpperCamelCase : List[str] = swin_name.split('_' )
_UpperCamelCase : Optional[Any] = name_split[1]
_UpperCamelCase : Optional[Any] = int(name_split[4] )
_UpperCamelCase : Tuple = int(name_split[3][-1] )
if model_size == "tiny":
_UpperCamelCase : List[Any] = 9_6
_UpperCamelCase : Any = (2, 2, 6, 2)
_UpperCamelCase : Optional[int] = (3, 6, 1_2, 2_4)
elif model_size == "small":
_UpperCamelCase : str = 9_6
_UpperCamelCase : Union[str, Any] = (2, 2, 1_8, 2)
_UpperCamelCase : str = (3, 6, 1_2, 2_4)
elif model_size == "base":
_UpperCamelCase : Dict = 1_2_8
_UpperCamelCase : Union[str, Any] = (2, 2, 1_8, 2)
_UpperCamelCase : List[Any] = (4, 8, 1_6, 3_2)
else:
_UpperCamelCase : Optional[Any] = 1_9_2
_UpperCamelCase : Dict = (2, 2, 1_8, 2)
_UpperCamelCase : List[Any] = (6, 1_2, 2_4, 4_8)
if "in22k" in swin_name:
_UpperCamelCase : int = 2_1_8_4_1
else:
_UpperCamelCase : Optional[Any] = 1_0_0_0
_UpperCamelCase : Optional[int] = '''huggingface/label-files'''
_UpperCamelCase : int = '''imagenet-1k-id2label.json'''
_UpperCamelCase : Tuple = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_UpperCamelCase : Union[str, Any] = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : Dict = idalabel
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
_UpperCamelCase : Optional[int] = img_size
_UpperCamelCase : List[str] = num_classes
_UpperCamelCase : Tuple = embed_dim
_UpperCamelCase : Optional[int] = depths
_UpperCamelCase : Any = num_heads
_UpperCamelCase : Any = window_size
return config
def A__ ( UpperCAmelCase_ ):
if "patch_embed.proj" in name:
_UpperCamelCase : List[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_UpperCamelCase : int = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_UpperCamelCase : Union[str, Any] = '''encoder.''' + name
if "attn.proj" in name:
_UpperCamelCase : Dict = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_UpperCamelCase : List[str] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_UpperCamelCase : List[str] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_UpperCamelCase : Tuple = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_UpperCamelCase : Dict = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_UpperCamelCase : str = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
_UpperCamelCase : Dict = '''layernorm.weight'''
if name == "norm.bias":
_UpperCamelCase : Union[str, Any] = '''layernorm.bias'''
if "head" in name:
_UpperCamelCase : List[Any] = name.replace('head' , 'classifier' )
else:
_UpperCamelCase : str = '''swin.''' + name
return name
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
for key in orig_state_dict.copy().keys():
_UpperCamelCase : Any = orig_state_dict.pop(UpperCAmelCase_ )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCamelCase : str = key.split('.' )
_UpperCamelCase : Tuple = int(key_split[1] )
_UpperCamelCase : List[Any] = int(key_split[3] )
_UpperCamelCase : str = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCamelCase : List[str] = val[:dim, :]
_UpperCamelCase : Dict = val[
dim : dim * 2, :
]
_UpperCamelCase : Dict = val[-dim:, :]
else:
_UpperCamelCase : Any = val[
:dim
]
_UpperCamelCase : Dict = val[
dim : dim * 2
]
_UpperCamelCase : Optional[Any] = val[
-dim:
]
else:
_UpperCamelCase : Optional[Any] = val
return orig_state_dict
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Dict = timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_ )
timm_model.eval()
_UpperCamelCase : Any = get_swin_config(UpperCAmelCase_ )
_UpperCamelCase : str = SwinForImageClassification(UpperCAmelCase_ )
model.eval()
_UpperCamelCase : str = convert_state_dict(timm_model.state_dict() , UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase : Any = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
_UpperCamelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
_UpperCamelCase : Dict = image_processor(images=UpperCAmelCase_ , return_tensors='pt' )
_UpperCamelCase : str = timm_model(inputs['pixel_values'] )
_UpperCamelCase : str = model(**UpperCAmelCase_ ).logits
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
print(f'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
snake_case_ : List[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 83
|
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( UpperCamelCase):
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase_ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class snake_case__ :
def __init__( self : List[Any] , _A : List[str] , _A : Optional[Any]=13 , _A : List[str]=64 , _A : Tuple=3 , _A : int=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Union[str, Any]=[1, 2, 10] , _A : List[Any]=[7, 3, 3] , _A : Optional[Any]=[4, 2, 2] , _A : List[Any]=[2, 1, 1] , _A : Union[str, Any]=[2, 2, 2] , _A : Tuple=[False, False, True] , _A : str=[0.0, 0.0, 0.0] , _A : List[Any]=0.02 , _A : int=1e-12 , _A : Optional[int]=True , _A : List[str]=True , _A : Union[str, Any]=2 , ) -> List[Any]:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Tuple = patch_sizes
UpperCAmelCase_ : int = patch_stride
UpperCAmelCase_ : Any = patch_padding
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : Tuple = stride_kv
UpperCAmelCase_ : Optional[Any] = depth
UpperCAmelCase_ : Dict = cls_token
UpperCAmelCase_ : Dict = attention_drop_rate
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ) -> int:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A ( self : Dict , _A : List[Any] , _A : Tuple , _A : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = CvtModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = model(_A )
UpperCAmelCase_ : List[str] = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase_ : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase_ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A ( self : Any , _A : int , _A : str , _A : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : str = CvtForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : int = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = config_and_inputs
UpperCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
a_ = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = CvtModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A ( self : Any ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : int ) -> List[str]:
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def A ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def A ( self : Any ) -> Optional[Any]:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def A ( self : List[Any] ) -> Any:
pass
def A ( self : int ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_A )
UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : Dict ) -> List[str]:
def check_hidden_states_output(_A : Dict , _A : str , _A : int ):
UpperCAmelCase_ : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase_ : Optional[Any] = outputs.hidden_states
UpperCAmelCase_ : Any = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Dict = True
check_hidden_states_output(_A , _A , _A )
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : List[Any] ) -> Optional[Any]:
pass
@slow
def A ( self : Optional[int] ) -> int:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = CvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase):
@cached_property
def A ( self : Union[str, Any] ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : str = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_A )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_A )
# verify the logits
UpperCAmelCase_ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 304
| 0
|
def lowerCAmelCase_ ( _snake_case : str ) -> bool:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
__magic_name__ : List[str] = sorted(string.lower() )
return len(_snake_case ) == len(set(_snake_case ) )
if __name__ == "__main__":
snake_case : Dict = input("Enter a string ").strip()
snake_case : Tuple = is_isogram(input_str)
print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 281
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase)
class snake_case__ ( UpperCamelCase):
a_ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True})
a_ = Features({"text": Value("string")})
a_ = Features({})
a_ = "text"
@property
def A ( self : List[str] ) -> Dict[str, str]:
return {self.text_column: "text"}
| 304
| 0
|
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase )
for i in range(__UpperCamelCase ):
for j in range(i + 1 , __UpperCamelCase ):
if numbers[j] < numbers[i]:
SCREAMING_SNAKE_CASE__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__lowerCamelCase : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 219
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __UpperCAmelCase ( A : int , A : Any="shi-labs/oneformer_demo" ) -> Dict:
with open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) as f:
UpperCAmelCase_ : Union[str, Any] = json.load(A )
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : str = []
for key, info in class_info.items():
UpperCAmelCase_ : Tuple = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(A ) )
UpperCAmelCase_ : Any = thing_ids
UpperCAmelCase_ : Union[str, Any] = class_names
return metadata
class snake_case__ ( unittest.TestCase):
def __init__( self : Any , _A : str , _A : Optional[int]=7 , _A : Tuple=3 , _A : Tuple=30 , _A : List[Any]=4_00 , _A : Tuple=None , _A : Optional[Any]=True , _A : Optional[Any]=True , _A : Any=[0.5, 0.5, 0.5] , _A : Any=[0.5, 0.5, 0.5] , _A : List[str]=10 , _A : Optional[int]=False , _A : Union[str, Any]=2_55 , _A : List[Any]="shi-labs/oneformer_demo" , _A : str="ade20k_panoptic.json" , _A : List[Any]=10 , ) -> Any:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Optional[int] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Tuple = {'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size
UpperCAmelCase_ : int = do_normalize
UpperCAmelCase_ : List[Any] = image_mean
UpperCAmelCase_ : Dict = image_std
UpperCAmelCase_ : str = class_info_file
UpperCAmelCase_ : Optional[Any] = prepare_metadata(_A , _A )
UpperCAmelCase_ : Tuple = num_text
UpperCAmelCase_ : Union[str, Any] = repo_path
# for the post_process_functions
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : Dict = 10
UpperCAmelCase_ : int = 10
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : str = 4
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Union[str, Any] = do_reduce_labels
UpperCAmelCase_ : str = ignore_index
def A ( self : Dict ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A ( self : Any , _A : List[Any] , _A : List[str]=False ) -> Optional[Any]:
if not batched:
UpperCAmelCase_ : Any = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ : Union[str, Any] = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase_ : int = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase_ : List[Any] = self.size['''shortest_edge''']
UpperCAmelCase_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase_ : Dict = self.size['''shortest_edge''']
UpperCAmelCase_ : str = self.size['''shortest_edge''']
else:
UpperCAmelCase_ : Dict = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ : int = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase_ : List[str] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
def A ( self : Tuple ) -> str:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ = image_processing_class
def A ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : int = OneFormerImageProcessorTester(self )
@property
def A ( self : Any ) -> int:
return self.image_processing_tester.prepare_image_processor_dict()
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''ignore_index''' ) )
self.assertTrue(hasattr(_A , '''class_info_file''' ) )
self.assertTrue(hasattr(_A , '''num_text''' ) )
self.assertTrue(hasattr(_A , '''repo_path''' ) )
self.assertTrue(hasattr(_A , '''metadata''' ) )
self.assertTrue(hasattr(_A , '''do_reduce_labels''' ) )
def A ( self : Dict ) -> Dict:
pass
def A ( self : Tuple ) -> Dict:
# Initialize image_processor
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : int = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Tuple:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Dict ) -> Union[str, Any]:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Optional[int] = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : int , _A : Any=False , _A : List[Any]=False , _A : Any="np" ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase_ : Tuple = self.image_processing_tester.num_labels
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
if with_segmentation_maps:
UpperCAmelCase_ : Any = num_labels
if is_instance_map:
UpperCAmelCase_ : Any = list(range(_A ) ) * 2
UpperCAmelCase_ : Optional[Any] = dict(enumerate(_A ) )
UpperCAmelCase_ : Dict = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase_ : Dict = [Image.fromarray(_A ) for annotation in annotations]
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , _A , return_tensors='''pt''' , instance_id_to_semantic_id=_A , pad_and_return_pixel_mask=_A , )
return inputs
def A ( self : int ) -> str:
pass
def A ( self : Tuple ) -> Union[str, Any]:
def common(_A : Optional[int]=False , _A : str=None ):
UpperCAmelCase_ : List[str] = self.comm_get_image_processor_inputs(
with_segmentation_maps=_A , is_instance_map=_A , segmentation_type=_A )
UpperCAmelCase_ : List[Any] = inputs['''mask_labels''']
UpperCAmelCase_ : Optional[Any] = inputs['''class_labels''']
UpperCAmelCase_ : int = inputs['''pixel_values''']
UpperCAmelCase_ : Tuple = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(_A , _A , _A ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_A ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_A )
common(is_instance_map=_A , segmentation_type='''pil''' )
common(is_instance_map=_A , segmentation_type='''pil''' )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : int = np.zeros((20, 50) )
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : List[Any] = binary_mask_to_rle(_A )
self.assertEqual(len(_A ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A ( self : Any ) -> List[Any]:
UpperCAmelCase_ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(_A )
self.assertEqual(len(_A ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase_ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase_ : Any = fature_extractor.post_process_semantic_segmentation(_A , target_sizes=_A )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_instance_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_panoptic_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 304
| 0
|
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __lowercase ( _lowercase ):
def __init__(self , A , A , A=1_0_2_4 , A=1_0_2_4 , A=3.6 ):
lowerCamelCase_ : Union[str, Any] = tokenizer
lowerCamelCase_ : List[Any] = tokenizer.bos_token_id
lowerCamelCase_ : Dict = dataset
lowerCamelCase_ : Union[str, Any] = seq_length
lowerCamelCase_ : Tuple = seq_length * chars_per_token * num_of_sequences
def __iter__(self ):
lowerCamelCase_ : Dict = iter(self.dataset )
lowerCamelCase_ : int = True
while more_examples:
lowerCamelCase_ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowerCamelCase_ : Optional[Any] = False
break
lowerCamelCase_ : str = tokenizer(_A , truncation=_A )['''input_ids''']
lowerCamelCase_ : Any = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_A ) , self.seq_length ):
lowerCamelCase_ : List[Any] = all_token_ids[i : i + self.seq_length]
if len(_A ) == self.seq_length:
yield torch.tensor(_A )
def lowercase_ ( _lowercase ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ : Tuple = {'''streaming''': True}
lowerCamelCase_ : int = load_dataset(args.dataset_name , split='''train''' , **_lowercase )
lowerCamelCase_ : Union[str, Any] = ConstantLengthDataset(_lowercase , _lowercase , seq_length=args.seq_length )
lowerCamelCase_ : Any = DataLoader(_lowercase , batch_size=args.batch_size )
return eval_dataloader
def lowercase_ ( _lowercase ) -> Optional[Any]:
'''simple docstring'''
model.eval()
lowerCamelCase_ : int = []
for step, batch in enumerate(_lowercase ):
with torch.no_grad():
lowerCamelCase_ : Any = model(_lowercase , labels=_lowercase )
lowerCamelCase_ : str = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowercase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowerCamelCase_ : List[Any] = torch.mean(torch.cat(_lowercase ) )
try:
lowerCamelCase_ : List[str] = torch.exp(_lowercase )
except OverflowError:
lowerCamelCase_ : Any = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
__lowercase : Optional[int] = Accelerator()
# Parse configuration
__lowercase : Optional[Any] = HfArgumentParser(EvaluationArguments)
__lowercase : List[Any] = parser.parse_args()
set_seed(args.seed)
# Logging
__lowercase : Tuple = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
__lowercase : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__lowercase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__lowercase : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
__lowercase : List[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
__lowercase : Dict = evaluate(args)
logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
| 318
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCamelCase : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_UpperCamelCase : str = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCamelCase : List[str] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __UpperCAmelCase ( A : Optional[int] ) -> int:
UpperCAmelCase_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , A )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Optional[Any] = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCAmelCase_ : Dict = collections.defaultdict(A )
UpperCAmelCase_ : str = collections.defaultdict(A )
UpperCAmelCase_ : int = collections.defaultdict(A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(A ):
UpperCAmelCase_ : int = None
if _re_tf_models.match(A ) is not None:
UpperCAmelCase_ : Optional[Any] = tf_models
UpperCAmelCase_ : Optional[int] = _re_tf_models.match(A ).groups()[0]
elif _re_flax_models.match(A ) is not None:
UpperCAmelCase_ : int = flax_models
UpperCAmelCase_ : Any = _re_flax_models.match(A ).groups()[0]
elif _re_pt_models.match(A ) is not None:
UpperCAmelCase_ : Union[str, Any] = pt_models
UpperCAmelCase_ : List[Any] = _re_pt_models.match(A ).groups()[0]
if lookup_dict is not None:
while len(A ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCAmelCase_ : Optional[int] = True
break
# Try again after removing the last word in the name
UpperCAmelCase_ : List[Any] = ''''''.join(camel_case_split(A )[:-1] )
UpperCAmelCase_ : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCAmelCase_ : List[Any] = list(A )
all_models.sort()
UpperCAmelCase_ : Dict = {'''model_type''': all_models}
UpperCAmelCase_ : Tuple = [pt_models[t] for t in all_models]
UpperCAmelCase_ : Dict = [tf_models[t] for t in all_models]
UpperCAmelCase_ : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCAmelCase_ : int = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCAmelCase_ : Any = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCAmelCase_ : Union[str, Any] = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCAmelCase_ : int = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCAmelCase_ : Dict = '''AutoTokenizer'''
UpperCAmelCase_ : str = [processors[t] for t in all_models]
return pd.DataFrame(A )
def __UpperCAmelCase ( A : Optional[int] ) -> str:
UpperCAmelCase_ : int = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCAmelCase_ : Tuple = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
UpperCAmelCase_ : Tuple = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(A , A , A ):
# The type of pipeline may not exist in this framework
if not hasattr(A , A ):
continue
# First extract all model_names
UpperCAmelCase_ : List[str] = []
for name in getattr(A , A ).values():
if isinstance(A , A ):
model_names.append(A )
else:
model_names.extend(list(A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __UpperCAmelCase ( A : int , A : Any ) -> Tuple:
UpperCAmelCase_ : Tuple = get_frameworks_table()
UpperCAmelCase_ : Any = Dataset.from_pandas(A )
UpperCAmelCase_ : str = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=A )
UpperCAmelCase_ : Union[str, Any] = Dataset.from_json(A )
UpperCAmelCase_ : Optional[int] = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(A ) )
}
UpperCAmelCase_ : str = update_pipeline_and_auto_class_table(A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCAmelCase_ : Union[str, Any] = sorted(table.keys() )
UpperCAmelCase_ : Optional[Any] = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
UpperCAmelCase_ : Dict = Dataset.from_pandas(A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(A , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(A , '''pipeline_tags.json''' ) )
if commit_sha is not None:
UpperCAmelCase_ : List[str] = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
UpperCAmelCase_ : int = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=A , repo_type='''dataset''' , token=A , commit_message=A , )
def __UpperCAmelCase ( ) -> int:
UpperCAmelCase_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCAmelCase_ : List[str] = transformers_module.pipelines.SUPPORTED_TASKS
UpperCAmelCase_ : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
UpperCAmelCase_ : Optional[Any] = pipeline_tasks[key]['''pt''']
if isinstance(A , (list, tuple) ):
UpperCAmelCase_ : Dict = model[0]
UpperCAmelCase_ : Any = model.__name__
if model not in in_table.values():
missing.append(A )
if len(A ) > 0:
UpperCAmelCase_ : List[Any] = ''', '''.join(A )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_UpperCamelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 304
| 0
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=0.2,__lowerCamelCase=0.2 ):
A__ = bp_numa
A__ = bp_numa
A__ = bp_numa
A__ = conva_get[:2]
A__ = conva_get[2]
A__ = size_pa
A__ = rate_w
A__ = rate_t
A__ = [
np.mat(-1 * np.random.rand(self.conva[0],self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
A__ = np.mat(-1 * np.random.rand(self.num_bpa,self.num_bpa ) + 0.5 )
A__ = np.mat(-1 * np.random.rand(self.num_bpa,self.num_bpa ) + 0.5 )
A__ = -2 * np.random.rand(self.conva[1] ) + 1
A__ = -2 * np.random.rand(self.num_bpa ) + 1
A__ = -2 * np.random.rand(self.num_bpa ) + 1
def UpperCamelCase ( self,__lowerCamelCase ):
# save model dict with pickle
A__ = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_A,'''wb''' ) as f:
pickle.dump(_A,_A )
print(f"Model saved: {save_path}" )
@classmethod
def UpperCamelCase ( cls,__lowerCamelCase ):
# read saved model
with open(_A,'''rb''' ) as f:
A__ = pickle.load(_A ) # noqa: S301
A__ = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
A__ = model_dic.get('''size_pooling1''' )
A__ = model_dic.get('''num_bp1''' )
A__ = model_dic.get('''num_bp2''' )
A__ = model_dic.get('''num_bp3''' )
A__ = model_dic.get('''rate_weight''' )
A__ = model_dic.get('''rate_thre''' )
# create model instance
A__ = CNN(_A,_A,_A,_A,_A,_A,_A )
# modify model parameter
A__ = model_dic.get('''w_conv1''' )
A__ = model_dic.get('''wkj''' )
A__ = model_dic.get('''vji''' )
A__ = model_dic.get('''thre_conv1''' )
A__ = model_dic.get('''thre_bp2''' )
A__ = model_dic.get('''thre_bp3''' )
return conv_ins
def UpperCamelCase ( self,__lowerCamelCase ):
return 1 / (1 + np.exp(-1 * x ))
def UpperCamelCase ( self,__lowerCamelCase ):
return round(_A,3 )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
# convolution process
A__ = convs[0]
A__ = convs[1]
A__ = np.shape(_A )[0]
# get the data slice of original image data, data_focus
A__ = []
for i_focus in range(0,size_data - size_conv + 1,_A ):
for j_focus in range(0,size_data - size_conv + 1,_A ):
A__ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_A )
# calculate the feature map of every single kernel, and saved as list of matrix
A__ = []
A__ = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_A ):
A__ = []
for i_focus in range(len(_A ) ):
A__ = (
np.sum(np.multiply(data_focus[i_focus],w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_A ) )
A__ = np.asmatrix(_A ).reshape(
_A,_A )
data_featuremap.append(_A )
# expanding the data slice to One dimenssion
A__ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_A ) )
A__ = np.asarray(_A )
return focus_list, data_featuremap
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase="average_pool" ):
# pooling process
A__ = len(featuremaps[0] )
A__ = int(size_map / size_pooling )
A__ = []
for i_map in range(len(_A ) ):
A__ = featuremaps[i_map]
A__ = []
for i_focus in range(0,_A,_A ):
for j_focus in range(0,_A,_A ):
A__ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_A ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_A ) )
A__ = np.asmatrix(_A ).reshape(_A,_A )
featuremap_pooled.append(_A )
return featuremap_pooled
def UpperCamelCase ( self,__lowerCamelCase ):
# expanding three dimension data to one dimension list
A__ = []
for i in range(len(_A ) ):
A__ = np.shape(data[i] )
A__ = data[i].reshape(1,shapes[0] * shapes[1] )
A__ = data_listed.getA().tolist()[0]
data_expanded.extend(_A )
A__ = np.asarray(_A )
return data_expanded
def UpperCamelCase ( self,__lowerCamelCase ):
# expanding matrix to one dimension list
A__ = np.asarray(_A )
A__ = np.shape(_A )
A__ = data_mat.reshape(1,shapes[0] * shapes[1] )
return data_expanded
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = []
A__ = 0
for i_map in range(_A ):
A__ = np.ones((size_map, size_map) )
for i in range(0,_A,_A ):
for j in range(0,_A,_A ):
A__ = pd_pool[
i_pool
]
A__ = i_pool + 1
A__ = np.multiply(
_A,np.multiply(out_map[i_map],(1 - out_map[i_map]) ) )
pd_all.append(_A )
return pd_all
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_A )) )
print((''' - - Shape: Teach_Data ''', np.shape(_A )) )
A__ = 0
A__ = []
A__ = 1_0000
while rp < n_repeat and mse >= error_accuracy:
A__ = 0
print(f"-------------Learning Time {rp}--------------" )
for p in range(len(_A ) ):
# print('------------Learning Image: %d--------------'%p)
A__ = np.asmatrix(datas_train[p] )
A__ = np.asarray(datas_teach[p] )
A__ = self.convolute(
_A,self.conva,self.w_conva,self.thre_conva,conv_step=self.step_conva,)
A__ = self.pooling(_A,self.size_poolinga )
A__ = np.shape(_A )
A__ = self._expand(_A )
A__ = data_bp_input
A__ = np.dot(_A,self.vji.T ) - self.thre_bpa
A__ = self.sig(_A )
A__ = np.dot(_A,self.wkj.T ) - self.thre_bpa
A__ = self.sig(_A )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
A__ = np.multiply(
(data_teach - bp_outa),np.multiply(_A,(1 - bp_outa) ) )
A__ = np.multiply(
np.dot(_A,self.wkj ),np.multiply(_A,(1 - bp_outa) ) )
A__ = np.dot(_A,self.vji )
A__ = pd_i_all / (self.size_poolinga * self.size_poolinga)
A__ = pd_conva_pooled.T.getA().tolist()
A__ = self._calculate_gradient_from_pool(
_A,_A,shape_featuremapa[0],shape_featuremapa[1],self.size_poolinga,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
A__ = self._expand_mat(pd_conva_all[k_conv] )
A__ = self.rate_weight * np.dot(_A,_A )
A__ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
A__ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
A__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
A__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
A__ = self.thre_bpa - pd_k_all * self.rate_thre
A__ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
A__ = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
A__ = rp + 1
A__ = error_count / patterns
all_mse.append(_A )
def draw_error():
A__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_A,'''+-''' )
plt.plot(_A,'''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_A,alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f" - - Mse: {mse:.6f}") )
if draw_e:
draw_error()
return mse
def UpperCamelCase ( self,__lowerCamelCase ):
# model predict
A__ = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_A )) )
for p in range(len(_A ) ):
A__ = np.asmatrix(datas_test[p] )
A__ = self.convolute(
_A,self.conva,self.w_conva,self.thre_conva,conv_step=self.step_conva,)
A__ = self.pooling(_A,self.size_poolinga )
A__ = self._expand(_A )
A__ = data_bp_input
A__ = bp_outa * self.vji.T - self.thre_bpa
A__ = self.sig(_A )
A__ = bp_outa * self.wkj.T - self.thre_bpa
A__ = self.sig(_A )
produce_out.extend(bp_outa.getA().tolist() )
A__ = [list(map(self.do_round,_A ) ) for each in produce_out]
return np.asarray(_A )
def UpperCamelCase ( self,__lowerCamelCase ):
# return the data of image after convoluting process so we can check it out
A__ = np.asmatrix(_A )
A__ = self.convolute(
_A,self.conva,self.w_conva,self.thre_conva,conv_step=self.step_conva,)
A__ = self.pooling(_A,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 193
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
_UpperCamelCase : Optional[int] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_UpperCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
a_ = field(
default=UpperCamelCase , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase)} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
a_ = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
a_ = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class snake_case__ :
a_ = field(
default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
a_ = field(
default=UpperCamelCase , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."})
a_ = field(default=UpperCamelCase , metadata={"help": "Whether ot not to use whole word mask."})
a_ = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"})
a_ = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a_ = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."})
a_ = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
def __UpperCAmelCase ( A : DataTrainingArguments , A : PreTrainedTokenizer , A : bool = False , A : Optional[str] = None , ) -> List[Any]:
def _dataset(A : Dict , A : str=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=A , file_path=A , block_size=args.block_size , ref_path=A , )
return LineByLineTextDataset(tokenizer=A , file_path=A , block_size=args.block_size )
else:
return TextDataset(
tokenizer=A , file_path=A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=A , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(A ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __UpperCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCAmelCase_ : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
UpperCAmelCase_ : str = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
UpperCAmelCase_ : int = AutoModelWithLMHead.from_config(A )
model.resize_token_embeddings(len(A ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
UpperCAmelCase_ : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCAmelCase_ : Dict = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCAmelCase_ : str = (
get_dataset(A , tokenizer=A , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCAmelCase_ : Any = (
get_dataset(A , tokenizer=A , evaluate=A , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCAmelCase_ : Optional[int] = DataCollatorForPermutationLanguageModeling(
tokenizer=A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCAmelCase_ : Tuple = DataCollatorForWholeWordMask(
tokenizer=A , mlm_probability=data_args.mlm_probability )
else:
UpperCAmelCase_ : List[str] = DataCollatorForLanguageModeling(
tokenizer=A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase_ : Any = Trainer(
model=A , args=A , data_collator=A , train_dataset=A , eval_dataset=A , prediction_loss_only=A , )
# Training
if training_args.do_train:
UpperCAmelCase_ : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=A )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_ : Tuple = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ : Dict = trainer.evaluate()
UpperCAmelCase_ : Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
UpperCAmelCase_ : Optional[int] = {'''perplexity''': perplexity}
UpperCAmelCase_ : int = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(A , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , A , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(A )
return results
def __UpperCAmelCase ( A : Tuple ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 304
| 0
|
'''simple docstring'''
import cva
import numpy as np
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if k in (0.0_4, 0.0_6):
lowerCamelCase_ =k
lowerCamelCase_ =window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ):
"""simple docstring"""
return str(self.k )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =cva.imread(_A, 0 )
lowerCamelCase_ =img.shape
lowerCamelCase_ =[]
lowerCamelCase_ =img.copy()
lowerCamelCase_ =cva.cvtColor(_A, cva.COLOR_GRAY2RGB )
lowerCamelCase_ =np.gradient(_A )
lowerCamelCase_ =dx**2
lowerCamelCase_ =dy**2
lowerCamelCase_ =dx * dy
lowerCamelCase_ =0.0_4
lowerCamelCase_ =self.window_size // 2
for y in range(_A, h - offset ):
for x in range(_A, w - offset ):
lowerCamelCase_ =ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ =iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ =ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase_ =(wxx * wyy) - (wxy**2)
lowerCamelCase_ =wxx + wyy
lowerCamelCase_ =det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 255 )
return color_img, corner_list
if __name__ == "__main__":
a_ : Dict = HarrisCorner(0.04, 3)
a_ : Union[str, Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 75
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_UpperCamelCase : Optional[int] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class snake_case__ ( unittest.TestCase):
@classmethod
def A ( cls : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(_A )
@classmethod
def A ( cls : int ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : List[str] = FlaxBertModel(_A )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A , repo_id='''test-model-flax''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel(_A )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[int] = flatten_dict(modela.params )
UpperCAmelCase_ : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase_ : int = False
return models_are_equal
@require_flax
class snake_case__ ( unittest.TestCase):
def A ( self : Any ) -> Any:
UpperCAmelCase_ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Any = FlaxBertModel(_A )
UpperCAmelCase_ : Tuple = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) )
with self.assertRaises(_A ):
UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Tuple:
UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Tuple = FlaxBertModel(_A )
UpperCAmelCase_ : str = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) , max_shard_size='''10KB''' )
with self.assertRaises(_A ):
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Optional[int]:
UpperCAmelCase_ : int = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Optional[Any] = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
| 304
| 0
|
"""simple docstring"""
import unittest
from transformers import DonutProcessor
_lowercase : Tuple = 'naver-clova-ix/donut-base'
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : str ):
__UpperCAmelCase = DonutProcessor.from_pretrained(_A )
def a ( self : Optional[int] ):
__UpperCAmelCase = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
__UpperCAmelCase = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
__UpperCAmelCase = self.processor.tokenajson(_A )
self.assertDictEqual(_A , _A )
| 332
|
'''simple docstring'''
_UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_UpperCamelCase : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
_UpperCamelCase : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 304
| 0
|
from ... import PretrainedConfig
__snake_case : List[Any] ={
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
snake_case_ ="""nezha"""
def __init__(self ,__lowerCamelCase=2_11_28 ,__lowerCamelCase=7_68 ,__lowerCamelCase=12 ,__lowerCamelCase=12 ,__lowerCamelCase=30_72 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=5_12 ,__lowerCamelCase=64 ,__lowerCamelCase=2 ,__lowerCamelCase=0.02 ,__lowerCamelCase=1e-12 ,__lowerCamelCase=0.1 ,__lowerCamelCase=0 ,__lowerCamelCase=2 ,__lowerCamelCase=3 ,__lowerCamelCase=True ,**__lowerCamelCase ,) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_A ,bos_token_id=_A ,eos_token_id=_A ,**_A )
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : Dict = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : Tuple = attention_probs_dropout_prob
lowerCAmelCase__ : List[Any] = max_position_embeddings
lowerCAmelCase__ : List[str] = max_relative_position
lowerCAmelCase__ : str = type_vocab_size
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : Union[str, Any] = classifier_dropout
lowerCAmelCase__ : List[str] = use_cache
| 129
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __UpperCAmelCase ( A : List[str] , A : Any , A : Optional[int] , A : Optional[int] ) -> Optional[Any]:
if isinstance(A , A ):
UpperCAmelCase_ : Any = np.full((len(A ), sequence_length, 2) , A )
else:
UpperCAmelCase_ : int = np.full((len(A ), sequence_length) , A )
for i, tensor in enumerate(A ):
if padding_side == "right":
if isinstance(A , A ):
UpperCAmelCase_ : Tuple = tensor[:sequence_length]
else:
UpperCAmelCase_ : Dict = tensor[:sequence_length]
else:
if isinstance(A , A ):
UpperCAmelCase_ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase_ : int = tensor[:sequence_length]
return out_tensor.tolist()
def __UpperCAmelCase ( A : List[Any] ) -> str:
UpperCAmelCase_ : Dict = ord(A )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
UpperCAmelCase_ : Union[str, Any] = unicodedata.category(A )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class snake_case__ ( UpperCamelCase):
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = -100
a_ = "pt"
def A ( self : List[Any] , _A : Dict ) -> Tuple:
import torch
UpperCAmelCase_ : Dict = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase_ : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase_ : Tuple = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase_ : Any = torch.tensor(batch['''entity_ids'''] ).shape[1]
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase_ : Optional[Any] = [
list(_A ) + [self.label_pad_token_id] * (sequence_length - len(_A )) for label in labels
]
else:
UpperCAmelCase_ : Any = [
[self.label_pad_token_id] * (sequence_length - len(_A )) + list(_A ) for label in labels
]
UpperCAmelCase_ : Union[str, Any] = [feature['''ner_tags'''] for feature in features]
UpperCAmelCase_ : Union[str, Any] = padding_tensor(_A , -1 , _A , _A )
UpperCAmelCase_ : List[str] = [feature['''original_entity_spans'''] for feature in features]
UpperCAmelCase_ : int = padding_tensor(_A , (-1, -1) , _A , _A )
UpperCAmelCase_ : Union[str, Any] = {k: torch.tensor(_A , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 304
| 0
|
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__snake_case = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def _A ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__UpperCamelCase = get_sagemaker_input()
else:
__UpperCamelCase = get_cluster_input()
return config
def _A ( _lowercase=None ) -> Optional[int]:
"""simple docstring"""
if subparsers is not None:
__UpperCamelCase = subparsers.add_parser('config' , description=_lowercase )
else:
__UpperCamelCase = argparse.ArgumentParser('Accelerate config command' , description=_lowercase )
parser.add_argument(
'--config_file' , default=_lowercase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = get_user_input()
if args.config_file is not None:
__UpperCamelCase = args.config_file
else:
if not os.path.isdir(_lowercase ):
os.makedirs(_lowercase )
__UpperCamelCase = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(_lowercase )
else:
config.to_yaml_file(_lowercase )
print(f'''accelerate configuration saved at {config_file}''' )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = config_command_parser()
__UpperCamelCase = parser.parse_args()
config_command(_lowercase )
if __name__ == "__main__":
main()
| 310
|
'''simple docstring'''
import functools
def __UpperCAmelCase ( A : str , A : str ) -> int:
UpperCAmelCase_ : Optional[Any] = len(A )
UpperCAmelCase_ : List[str] = len(A )
@functools.cache
def min_distance(A : int , A : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , A ) , 1 + min_distance(A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 0
|
import re
def _lowercase ( UpperCamelCase_ ) -> list:
'''simple docstring'''
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def _lowercase ( UpperCamelCase_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ = split_input(UpperCamelCase_ )
if upper:
SCREAMING_SNAKE_CASE__ = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
SCREAMING_SNAKE_CASE__ = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _lowercase ( UpperCamelCase_ ) -> str:
'''simple docstring'''
return to_simple_case(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ ) -> str:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ = to_simple_case(UpperCamelCase_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
return to_complex_case(UpperCamelCase_ , UpperCamelCase_ , '_' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
return to_complex_case(UpperCamelCase_ , UpperCamelCase_ , '-' )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 176
|
'''simple docstring'''
def __UpperCAmelCase ( A : int = 1_0_0_0 ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = 1, 1
UpperCAmelCase_ : Dict = []
for i in range(1 , n + 1 ):
UpperCAmelCase_ : Optional[int] = prev_numerator + 2 * prev_denominator
UpperCAmelCase_ : Tuple = prev_numerator + prev_denominator
if len(str(A ) ) > len(str(A ) ):
result.append(A )
UpperCAmelCase_ : Optional[Any] = numerator
UpperCAmelCase_ : Optional[int] = denominator
return len(A )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 304
| 0
|
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> Dict:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] ) -> str:
UpperCamelCase__ : List[str] = create_tensor(__UpperCAmelCase )
UpperCamelCase__ : Any = gather(__UpperCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowerCAmelCase_ ( __UpperCAmelCase: Dict ) -> Optional[Any]:
UpperCamelCase__ : Optional[Any] = [state.process_index]
UpperCamelCase__ : Any = gather_object(__UpperCAmelCase )
assert len(__UpperCAmelCase ) == state.num_processes, f"{gathered_obj}, {len(__UpperCAmelCase )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), f"{gathered_obj} != {list(range(state.num_processes ) )}"
def lowerCAmelCase_ ( __UpperCAmelCase: Any ) -> Tuple:
UpperCamelCase__ : int = create_tensor(__UpperCAmelCase )
UpperCamelCase__ : Any = broadcast(__UpperCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowerCAmelCase_ ( __UpperCAmelCase: Dict ) -> Union[str, Any]:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
UpperCamelCase__ : int = torch.arange(state.num_processes + 1 ).to(state.device )
else:
UpperCamelCase__ : Tuple = torch.arange(state.num_processes ).to(state.device )
UpperCamelCase__ : str = pad_across_processes(__UpperCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> Union[str, Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCamelCase__ : Any = create_tensor(__UpperCAmelCase )
UpperCamelCase__ : Dict = reduce(__UpperCAmelCase , '''sum''' )
UpperCamelCase__ : List[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), f"{reduced_tensor} != {truth_tensor}"
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> Tuple:
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCamelCase__ : Union[str, Any] = create_tensor(__UpperCAmelCase )
UpperCamelCase__ : List[str] = reduce(__UpperCAmelCase , '''mean''' )
UpperCamelCase__ : Dict = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), f"{reduced_tensor} != {truth_tensor}"
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] ) -> List[Any]:
# For xla_spawn (TPUs)
main()
def lowerCAmelCase_ ( ) -> List[Any]:
UpperCamelCase__ : Tuple = PartialState()
state.print(f"State: {state}" )
state.print('''testing gather''' )
test_gather(__UpperCAmelCase )
state.print('''testing gather_object''' )
test_gather_object(__UpperCAmelCase )
state.print('''testing broadcast''' )
test_broadcast(__UpperCAmelCase )
state.print('''testing pad_across_processes''' )
test_pad_across_processes(__UpperCAmelCase )
state.print('''testing reduce_sum''' )
test_reduce_sum(__UpperCAmelCase )
state.print('''testing reduce_mean''' )
test_reduce_mean(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 201
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case__ ( unittest.TestCase):
def __init__( self : int , _A : List[str] , _A : Dict=7 , _A : List[str]=3 , _A : List[str]=18 , _A : Dict=30 , _A : Union[str, Any]=4_00 , _A : List[str]=True , _A : List[str]=None , _A : int=True , _A : Tuple=None , _A : Union[str, Any]=True , _A : Tuple=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Tuple=False , ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Optional[Any] = do_center_crop
UpperCAmelCase_ : Optional[int] = crop_size
UpperCAmelCase_ : Tuple = do_normalize
UpperCAmelCase_ : Optional[Any] = image_mean
UpperCAmelCase_ : int = image_std
UpperCAmelCase_ : List[Any] = do_reduce_labels
def A ( self : Union[str, Any] ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : Optional[Any] = Image.open(dataset[0]['''file'''] )
UpperCAmelCase_ : str = Image.open(dataset[1]['''file'''] )
return image, map
def __UpperCAmelCase ( ) -> Any:
UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : int = Image.open(ds[0]['''file'''] )
UpperCAmelCase_ : Optional[Any] = Image.open(ds[1]['''file'''] )
UpperCAmelCase_ : Dict = Image.open(ds[2]['''file'''] )
UpperCAmelCase_ : List[str] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = BeitImageProcessor if is_vision_available() else None
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = BeitImageProcessingTester(self )
@property
def A ( self : List[Any] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''center_crop''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
def A ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , _A )
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , _A )
def A ( self : Optional[Any] ) -> Any:
pass
def A ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[int] ) -> str:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Any ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
UpperCAmelCase_ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : List[str] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_semantic_batch_inputs()
UpperCAmelCase_ : int = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def A ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 304
| 0
|
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
snake_case_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
snake_case_ : List[Any] = object()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Tuple = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(UpperCAmelCase_ ) - len(UpperCAmelCase_ ) + 1 ):
_UpperCamelCase : Dict = [x.match(UpperCAmelCase_ ) for x, y in zip(UpperCAmelCase_ , ks[i:] )]
if matches and all(UpperCAmelCase_ ):
return True
return False
def A__ ( UpperCAmelCase_ ):
def replace(UpperCAmelCase_ , UpperCAmelCase_ ):
for rule, replacement in rules:
if _match(UpperCAmelCase_ , UpperCAmelCase_ ):
return replacement
return val
return replace
def A__ ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , UpperCAmelCase_ )),
(("transformer", "wte", "embedding"), P('mp' , UpperCAmelCase_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCAmelCase_ , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , UpperCAmelCase_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCAmelCase_ , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , UpperCAmelCase_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = _get_partition_rules()
_UpperCamelCase : Any = _replacement_rules(UpperCAmelCase_ )
_UpperCamelCase : Any = {k: _unmatched for k in flatten_dict(UpperCAmelCase_ )}
_UpperCamelCase : List[str] = {k: replace(UpperCAmelCase_ , UpperCAmelCase_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCAmelCase_ ) )
| 83
|
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class snake_case__ ( enum.Enum):
a_ = 0
a_ = 1
a_ = 2
@add_end_docstrings(UpperCamelCase)
class snake_case__ ( UpperCamelCase):
a_ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : List[str] , *_A : Dict , **_A : int ) -> Optional[int]:
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase_ : Dict = None
if self.model.config.prefix is not None:
UpperCAmelCase_ : Tuple = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase_ : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._sanitize_parameters(prefix=_A , **self._forward_params )
UpperCAmelCase_ : int = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase_ : List[str] = {**self._forward_params, **forward_params}
def A ( self : Union[str, Any] , _A : int=None , _A : str=None , _A : Union[str, Any]=None , _A : List[Any]=None , _A : List[Any]=None , _A : int=None , _A : Optional[int]=None , _A : List[Any]=None , **_A : List[Any] , ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = {}
if prefix is not None:
UpperCAmelCase_ : List[Any] = prefix
if prefix:
UpperCAmelCase_ : Tuple = self.tokenizer(
_A , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase_ : List[Any] = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
''' [None, \'hole\']''' )
UpperCAmelCase_ : Union[str, Any] = handle_long_generation
preprocess_params.update(_A )
UpperCAmelCase_ : Optional[int] = generate_kwargs
UpperCAmelCase_ : Tuple = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase_ : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase_ : List[Any] = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase_ : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ : Any = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase_ : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def A ( self : Dict , *_A : Optional[Any] , **_A : Any ) -> Any:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_A , **_A )
def __call__( self : List[Any] , _A : Union[str, Any] , **_A : List[str] ) -> Dict:
return super().__call__(_A , **_A )
def A ( self : List[Any] , _A : List[Any] , _A : Any="" , _A : Dict=None , **_A : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.tokenizer(
prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase_ : str = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase_ : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase_ : Optional[int] = generate_kwargs['''max_new_tokens''']
else:
UpperCAmelCase_ : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase_ : Dict = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
UpperCAmelCase_ : List[str] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase_ : Optional[int] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def A ( self : List[str] , _A : Optional[Any] , **_A : str ) -> Optional[int]:
UpperCAmelCase_ : Any = model_inputs['''input_ids''']
UpperCAmelCase_ : Dict = model_inputs.get('''attention_mask''' , _A )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = 1
else:
UpperCAmelCase_ : Optional[int] = input_ids.shape[0]
UpperCAmelCase_ : Dict = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase_ : List[str] = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
UpperCAmelCase_ : str = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase_ : Any = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase_ : Optional[Any] = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase_ : Union[str, Any] = self.model.generate(input_ids=_A , attention_mask=_A , **_A )
UpperCAmelCase_ : Any = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase_ : List[str] = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase_ : int = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def A ( self : int , _A : List[Any] , _A : Dict=ReturnType.FULL_TEXT , _A : Dict=True ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = model_outputs['''generated_sequence'''][0]
UpperCAmelCase_ : int = model_outputs['''input_ids''']
UpperCAmelCase_ : str = model_outputs['''prompt_text''']
UpperCAmelCase_ : Any = generated_sequence.numpy().tolist()
UpperCAmelCase_ : int = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ : Optional[Any] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase_ : Any = self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase_ : List[str] = 0
else:
UpperCAmelCase_ : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase_ : Dict = prompt_text + text[prompt_length:]
else:
UpperCAmelCase_ : Dict = text[prompt_length:]
UpperCAmelCase_ : List[str] = {'''generated_text''': all_text}
records.append(_A )
return records
| 304
| 0
|
import colorsys
from PIL import Image # type: ignore
def lowerCAmelCase_ ( _snake_case : float , _snake_case : float , _snake_case : int ) -> float:
'''simple docstring'''
__magic_name__ : Dict = x
__magic_name__ : List[str] = y
for step in range(_snake_case ): # noqa: B007
__magic_name__ : Dict = a * a - b * b + x
__magic_name__ : Tuple = 2 * a * b + y
__magic_name__ : Dict = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCAmelCase_ ( _snake_case : float ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCAmelCase_ ( _snake_case : float ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(_snake_case , 1 , 1 ) )
def lowerCAmelCase_ ( _snake_case : int = 800 , _snake_case : int = 600 , _snake_case : float = -0.6 , _snake_case : float = 0 , _snake_case : float = 3.2 , _snake_case : int = 50 , _snake_case : bool = True , ) -> Image.Image:
'''simple docstring'''
__magic_name__ : Union[str, Any] = Image.new("RGB" , (image_width, image_height) )
__magic_name__ : Any = img.load()
# loop through the image-coordinates
for image_x in range(_snake_case ):
for image_y in range(_snake_case ):
# determine the figure-coordinates based on the image-coordinates
__magic_name__ : Tuple = figure_width / image_width * image_height
__magic_name__ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
__magic_name__ : int = figure_center_y + (image_y / image_height - 0.5) * figure_height
__magic_name__ : Optional[Any] = get_distance(_snake_case , _snake_case , _snake_case )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__magic_name__ : Dict = get_color_coded_rgb(_snake_case )
else:
__magic_name__ : Tuple = get_black_and_white_rgb(_snake_case )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
snake_case : List[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 281
|
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCAmelCase ( A : int , A : int , A : bool , A : list[int] , A : float ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
)
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : List[str] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
UpperCAmelCase_ : List[Any] = math.log(len(A ) , 2 )
print(F"Optimal value : {minimax(0 , 0 , A , A , A )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 304
| 0
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__lowerCamelCase : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __snake_case ( datasets.BuilderConfig ):
lowerCAmelCase_ = None
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : "pyspark.sql.DataFrame" , __UpperCamelCase : List[int] , ) -> str:
"""simple docstring"""
import pyspark
def generate_fn():
SCREAMING_SNAKE_CASE__ = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
SCREAMING_SNAKE_CASE__ = df_with_partition_id.select("""*""" ).where(f"""part_id = {partition_id}""" ).drop("""part_id""" )
SCREAMING_SNAKE_CASE__ = partition_df.collect()
SCREAMING_SNAKE_CASE__ = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class __snake_case ( _BaseExamplesIterable ):
def __init__( self : int , _lowercase : "pyspark.sql.DataFrame" , _lowercase : Tuple=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = df
SCREAMING_SNAKE_CASE__ = partition_order or range(self.df.rdd.getNumPartitions() )
SCREAMING_SNAKE_CASE__ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[str] ):
"""simple docstring"""
yield from self.generate_examples_fn()
def __a ( self : Optional[Any] , _lowercase : np.random.Generator ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(_A )
return SparkExamplesIterable(self.df , partition_order=_A )
def __a ( self : List[Any] , _lowercase : int , _lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.split_shard_indices_by_worker(_A , _A )
return SparkExamplesIterable(self.df , partition_order=_A )
@property
def __a ( self : List[str] ):
"""simple docstring"""
return len(self.partition_order )
class __snake_case ( datasets.DatasetBuilder ):
lowerCAmelCase_ = SparkConfig
def __init__( self : Tuple , _lowercase : "pyspark.sql.DataFrame" , _lowercase : str = None , _lowercase : str = None , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
import pyspark
SCREAMING_SNAKE_CASE__ = pyspark.sql.SparkSession.builder.getOrCreate()
SCREAMING_SNAKE_CASE__ = df
SCREAMING_SNAKE_CASE__ = working_dir
super().__init__(
cache_dir=_A , config_name=str(self.df.semanticHash() ) , **_A , )
def __a ( self : int ):
"""simple docstring"""
def create_cache_and_write_probe(_lowercase : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=_A )
SCREAMING_SNAKE_CASE__ = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(_A , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
SCREAMING_SNAKE_CASE__ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_A ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def __a ( self : str ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __a ( self : Optional[Any] , _lowercase : datasets.download.download_manager.DownloadManager ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __a ( self : Any , _lowercase : Dict ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(_lowercase : Tuple ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
SCREAMING_SNAKE_CASE__ = self.df.count()
SCREAMING_SNAKE_CASE__ = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
SCREAMING_SNAKE_CASE__ = (
self.df.limit(_A )
.repartition(1 )
.mapInArrow(_A , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
SCREAMING_SNAKE_CASE__ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
SCREAMING_SNAKE_CASE__ = min(_A , int(approx_total_size / max_shard_size ) )
SCREAMING_SNAKE_CASE__ = self.df.repartition(_A )
def __a ( self : str , _lowercase : str , _lowercase : str , _lowercase : int , ):
"""simple docstring"""
import pyspark
SCREAMING_SNAKE_CASE__ = ParquetWriter if file_format == '''parquet''' else ArrowWriter
SCREAMING_SNAKE_CASE__ = os.path.join(self._working_dir , os.path.basename(_A ) ) if self._working_dir else fpath
SCREAMING_SNAKE_CASE__ = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
SCREAMING_SNAKE_CASE__ = self.config.features
SCREAMING_SNAKE_CASE__ = self._writer_batch_size
SCREAMING_SNAKE_CASE__ = self._fs.storage_options
def write_arrow(_lowercase : Dict ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
SCREAMING_SNAKE_CASE__ = pyspark.TaskContext().taskAttemptId()
SCREAMING_SNAKE_CASE__ = next(_A , _A )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = writer_class(
features=_A , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=_A , storage_options=_A , embed_local_files=_A , )
SCREAMING_SNAKE_CASE__ = pa.Table.from_batches([first_batch] )
writer.write_table(_A )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
SCREAMING_SNAKE_CASE__ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
SCREAMING_SNAKE_CASE__ = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=_A , storage_options=_A , embed_local_files=_A , )
SCREAMING_SNAKE_CASE__ = pa.Table.from_batches([batch] )
writer.write_table(_A )
if writer._num_bytes > 0:
SCREAMING_SNAKE_CASE__ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(_A ) ):
SCREAMING_SNAKE_CASE__ = os.path.join(os.path.dirname(_A ) , os.path.basename(_A ) )
shutil.move(_A , _A )
SCREAMING_SNAKE_CASE__ = (
self.df.mapInArrow(_A , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __a ( self : Optional[int] , _lowercase : "datasets.SplitGenerator" , _lowercase : str = "arrow" , _lowercase : Optional[Union[str, int]] = None , _lowercase : Optional[int] = None , **_lowercase : int , ):
"""simple docstring"""
self._validate_cache_dir()
SCREAMING_SNAKE_CASE__ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(_A )
SCREAMING_SNAKE_CASE__ = not is_remote_filesystem(self._fs )
SCREAMING_SNAKE_CASE__ = os.path.join if is_local else posixpath.join
SCREAMING_SNAKE_CASE__ = '''-TTTTT-SSSSS-of-NNNNN'''
SCREAMING_SNAKE_CASE__ = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
SCREAMING_SNAKE_CASE__ = path_join(self._output_dir , _A )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for task_id, content in self._prepare_split_single(_A , _A , _A ):
(
SCREAMING_SNAKE_CASE__
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(_A )
SCREAMING_SNAKE_CASE__ = total_num_examples
SCREAMING_SNAKE_CASE__ = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
SCREAMING_SNAKE_CASE__ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
SCREAMING_SNAKE_CASE__ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_lowercase : int , _lowercase : int , _lowercase : int , ):
rename(
_A , fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace("""TTTTT-SSSSS""" , f"""{global_shard_id:05d}""" ).replace("""NNNNN""" , f"""{total_shards:05d}""" ) , )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for i in range(len(_A ) ):
SCREAMING_SNAKE_CASE__ = task_id_and_num_shards[i]
for shard_id in range(_A ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(_A , len(_A ) ).map(lambda _lowercase : _rename_shard(*_A ) ).collect()
else:
# don't use any pattern
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace(_A , """""" ) , )
def __a ( self : List[Any] , _lowercase : "datasets.SplitGenerator" , ):
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 219
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : list , A : int , A : int , A : int ) -> list:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase_ : List[Any] = result + left + right
return input_list
def __UpperCAmelCase ( A : list ) -> list:
if len(A ) <= 1:
return input_list
UpperCAmelCase_ : List[str] = list(A )
# iteration for two-way merging
UpperCAmelCase_ : Tuple = 2
while p <= len(A ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(A ) , A ):
UpperCAmelCase_ : Union[str, Any] = i
UpperCAmelCase_ : int = i + p - 1
UpperCAmelCase_ : Any = (low + high + 1) // 2
UpperCAmelCase_ : Union[str, Any] = merge(A , A , A , A )
# final merge of last two parts
if p * 2 >= len(A ):
UpperCAmelCase_ : str = i
UpperCAmelCase_ : Tuple = merge(A , 0 , A , len(A ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
_UpperCamelCase : List[str] = []
else:
_UpperCamelCase : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 304
| 0
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowerCamelCase_ : List[Any] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowerCamelCase_ : List[str] = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowerCamelCase_ : Union[str, Any] = tf_top_k_top_p_filtering(_A , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
lowerCamelCase_ : Optional[int] = output[output != -float('''inf''' )]
lowerCamelCase_ : Optional[Any] = tf.cast(
tf.where(tf.not_equal(_A , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_A , _A , rtol=1E-12 )
tf.debugging.assert_equal(_A , _A )
@require_tf
class __lowercase ( unittest.TestCase , _lowercase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowerCamelCase : Union[str, Any] = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def UpperCAmelCase__ (self ):
# TF-only test: tf.saved_model export
lowerCamelCase_ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase_ : Optional[int] = 2
lowerCamelCase_ : Any = 2
class __lowercase ( tf.Module ):
def __init__(self , A ):
super(_A , self ).__init__()
lowerCamelCase_ : Optional[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_A , )
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : int = self.model.generate(
input_ids=_A , attention_mask=_A , max_new_tokens=_A , return_dict_in_generate=_A , )
return {"sequences": outputs["sequences"]}
lowerCamelCase_ : str = [[2, 0], [1_0_2, 1_0_3]]
lowerCamelCase_ : Optional[int] = [[1, 0], [1, 1]]
lowerCamelCase_ : Tuple = DummyModel(model=_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_A , _A , signatures={'''serving_default''': dummy_model.serving} )
lowerCamelCase_ : Union[str, Any] = tf.saved_model.load(_A ).signatures['''serving_default''']
for batch_size in range(1 , len(_A ) + 1 ):
lowerCamelCase_ : Optional[Any] = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
lowerCamelCase_ : Any = serving_func(**_A )['''sequences''']
lowerCamelCase_ : int = test_model.generate(**_A , max_new_tokens=_A )
tf.debugging.assert_equal(_A , _A )
@slow
def UpperCAmelCase__ (self ):
# TF-only test: tf.saved_model export
lowerCamelCase_ : Any = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase_ : List[str] = 1
lowerCamelCase_ : Union[str, Any] = 2
class __lowercase ( tf.Module ):
def __init__(self , A ):
super(_A , self ).__init__()
lowerCamelCase_ : List[str] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_A , )
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : Optional[Any] = self.model.generate(
input_ids=_A , attention_mask=_A , max_new_tokens=_A , return_dict_in_generate=_A , )
return {"sequences": outputs["sequences"]}
lowerCamelCase_ : Optional[Any] = [[2], [1_0_2, 1_0_3]]
lowerCamelCase_ : Optional[Any] = [[1], [1, 1]]
lowerCamelCase_ : Optional[Any] = DummyModel(model=_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_A , _A , signatures={'''serving_default''': dummy_model.serving} )
lowerCamelCase_ : Union[str, Any] = tf.saved_model.load(_A ).signatures['''serving_default''']
for input_row in range(len(_A ) ):
lowerCamelCase_ : Optional[int] = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
lowerCamelCase_ : List[Any] = serving_func(**_A )['''sequences''']
lowerCamelCase_ : int = test_model.generate(**_A , max_new_tokens=_A )
tf.debugging.assert_equal(_A , _A )
@slow
@require_tensorflow_text
def UpperCAmelCase__ (self ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_A )
class __lowercase ( tf.keras.layers.Layer ):
def __init__(self ):
super().__init__()
lowerCamelCase_ : Any = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_A , '''spiece.model''' ) , '''rb''' ).read() )
lowerCamelCase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def UpperCAmelCase__ (self , A , *A , **A ):
lowerCamelCase_ : Optional[Any] = self.tokenizer.tokenize(_A )
lowerCamelCase_ : Dict = text.pad_model_inputs(
_A , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
lowerCamelCase_ : Optional[int] = self.model.generate(input_ids=_A , attention_mask=_A )
return self.tokenizer.detokenize(_A )
lowerCamelCase_ : List[Any] = CompleteSentenceTransformer()
lowerCamelCase_ : List[Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
lowerCamelCase_ : str = complete_model(_A )
lowerCamelCase_ : Optional[Any] = tf.keras.Model(_A , _A )
keras_model.save(_A )
def UpperCAmelCase__ (self ):
# Has PT equivalent: this test relies on random sampling
lowerCamelCase_ : Tuple = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
lowerCamelCase_ : Union[str, Any] = 1_4
lowerCamelCase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase_ : Any = '''Hello, my dog is cute and'''
lowerCamelCase_ : Tuple = tokenizer(_A , return_tensors='''tf''' )
lowerCamelCase_ : int = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase_ : Any = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**_A , eos_token_id=_A , **_A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowerCamelCase_ : Any = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
lowerCamelCase_ : Dict = model.generate(**_A , eos_token_id=_A , **_A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def UpperCAmelCase__ (self ):
# Has PT equivalent: ample use of framework-specific code
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowerCamelCase_ : Optional[int] = '''Hugging Face is a technology company based in New York and Paris.'''
lowerCamelCase_ : Any = bart_tokenizer(_A , return_tensors='''tf''' ).input_ids
lowerCamelCase_ : Dict = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowerCamelCase_ : Any = bart_model.generate(_A ).numpy()
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self , A , A=None , **A ):
return super().call(_A , **_A )
lowerCamelCase_ : List[Any] = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowerCamelCase_ : int = bart_model.generate(_A , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_A , _A ) )
class __lowercase ( bart_model.model.encoder.__class__ ):
def UpperCAmelCase__ (self , A , **A ):
return super().call(_A , **_A )
lowerCamelCase_ : List[str] = FakeEncoder(bart_model.config , bart_model.model.shared )
lowerCamelCase_ : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCamelCase_ : Tuple = bart_model.generate(_A ).numpy()
with self.assertRaises(_A ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_A , foo='''bar''' )
| 318
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42
a_ = 42
a_ = 42
a_ = 42
a_ = 42
def A ( self : Tuple ) -> Optional[int]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def A ( self : List[Any] ) -> Union[str, Any]:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def A ( self : Any ) -> Optional[Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def A ( self : Optional[int] ) -> torch.Tensor:
UpperCAmelCase_ : Dict = torch.arange(self.height * self.width )
UpperCAmelCase_ : int = torch.stack(
[
pixel_indices % self.width,
torch.div(_A , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ , *UpperCAmelCase_ : Union[str, Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(_A ) )
UpperCAmelCase_ : Any = self.get_image_coords()
UpperCAmelCase_ : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Union[str, Any] = self.get_camera_rays(_A )
UpperCAmelCase_ : str = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def A ( self : Optional[int] , _A : torch.Tensor ) -> torch.Tensor:
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : Dict = coords.view(_A , -1 , 2 )
UpperCAmelCase_ : Union[str, Any] = self.resolution()
UpperCAmelCase_ : int = self.fov()
UpperCAmelCase_ : Dict = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[int] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Any = fracs.view(_A , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(_A , 1 , 3 )
+ self.x.view(_A , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=_A )
UpperCAmelCase_ : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_A , *_A , 2 , 3 )
def A ( self : Tuple , _A : int , _A : int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , )
def __UpperCAmelCase ( A : int ) -> DifferentiableProjectiveCamera:
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : str = np.array([np.sin(A ), np.cos(A ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : Optional[int] = -z * 4
UpperCAmelCase_ : Optional[int] = np.array([np.cos(A ), -np.sin(A ), 0.0] )
UpperCAmelCase_ : List[Any] = np.cross(A , A )
origins.append(A )
xs.append(A )
ys.append(A )
zs.append(A )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(A , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A , axis=0 ) ).float() , width=A , height=A , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A )) , )
| 304
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a__: Union[str, Any] = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: List[Any] = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: Optional[int] = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
a__: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 193
|
'''simple docstring'''
import random
class snake_case__ :
@staticmethod
def A ( _A : str ) -> tuple[list[int], list[int]]:
UpperCAmelCase_ : Dict = [ord(_A ) for i in text]
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Any = []
for i in plain:
UpperCAmelCase_ : int = random.randint(1 , 3_00 )
UpperCAmelCase_ : str = (i + k) * k
cipher.append(_A )
key.append(_A )
return cipher, key
@staticmethod
def A ( _A : list[int] , _A : list[int] ) -> str:
UpperCAmelCase_ : Dict = []
for i in range(len(_A ) ):
UpperCAmelCase_ : int = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_A ) )
return "".join(_A )
if __name__ == "__main__":
_UpperCamelCase , _UpperCamelCase : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 304
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def a_ ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Any =StableDiffusionLatentUpscalePipeline
lowercase : str =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
lowercase : Optional[Any] =PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
lowercase : Tuple =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase : int =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase : int =frozenset([] )
lowercase : List[str] =True
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =1
lowerCamelCase_ =4
lowerCamelCase_ =(16, 16)
lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(_A )
return image
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =UNetaDConditionModel(
act_fn='''gelu''', attention_head_dim=8, norm_num_groups=_A, block_out_channels=[32, 32, 64, 64], time_cond_proj_dim=160, conv_in_kernel=1, conv_out_kernel=1, cross_attention_dim=32, down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
), in_channels=8, mid_block_type=_A, only_cross_attention=_A, out_channels=5, resnet_time_scale_shift='''scale_shift''', time_embedding_type='''fourier''', timestep_post_act='''gelu''', up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D'''), )
lowerCamelCase_ =AutoencoderKL(
block_out_channels=[32, 32, 64, 64], in_channels=3, out_channels=3, down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
lowerCamelCase_ =EulerDiscreteScheduler(prediction_type='''sample''' )
lowerCamelCase_ =CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='''quick_gelu''', projection_dim=512, )
lowerCamelCase_ =CLIPTextModel(_A )
lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase_ ={
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(_A )
else:
lowerCamelCase_ =torch.Generator(device=_A ).manual_seed(_A )
lowerCamelCase_ ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
lowerCamelCase_ =self.get_dummy_inputs(_A )
lowerCamelCase_ =pipe(**_A ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 256, 256, 3) )
lowerCamelCase_ =np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
lowerCamelCase_ =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A, 1e-3 )
def lowercase__ ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def lowercase__ ( self ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def lowercase__ ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowercase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def lowercase__ ( self ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def lowercase__ ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3e-3 )
def lowercase__ ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
lowerCamelCase_ =self.get_dummy_inputs(_A )
lowerCamelCase_ =2
lowerCamelCase_ =[]
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowerCamelCase_ =getattr(_A, scheduler_enum.name )
lowerCamelCase_ =scheduler_cls.from_config(pipe.scheduler.config )
lowerCamelCase_ =pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch.manual_seed(33 )
lowerCamelCase_ =StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''', torch_dtype=torch.floataa )
pipe.to('''cuda''' )
lowerCamelCase_ =StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''', torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowerCamelCase_ ='''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
lowerCamelCase_ =pipe(_A, generator=_A, output_type='''latent''' ).images
lowerCamelCase_ =upscaler(
prompt=_A, image=_A, num_inference_steps=20, guidance_scale=0, generator=_A, output_type='''np''', ).images[0]
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch.manual_seed(33 )
lowerCamelCase_ =StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''', torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowerCamelCase_ ='''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
lowerCamelCase_ =upscaler(
prompt=_A, image=_A, num_inference_steps=20, guidance_scale=0, generator=_A, output_type='''np''', ).images[0]
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 75
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = ReformerTokenizer
a_ = ReformerTokenizerFast
a_ = True
a_ = False
a_ = True
def A ( self : Optional[Any] ) -> List[Any]:
super().setUp()
UpperCAmelCase_ : Tuple = ReformerTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : List[Any] = '''<s>'''
UpperCAmelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_A ) , 10_00 )
def A ( self : Optional[int] ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def A ( self : Optional[Any] ) -> List[Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ : Any = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize(_A )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : List[str] = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase_ : int = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = tokenizer.encode(_A )
UpperCAmelCase_ : List[str] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def A ( self : Tuple , _A : Dict=15 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(_A , **_A )
# Simple input
UpperCAmelCase_ : Optional[int] = '''This is a simple input'''
UpperCAmelCase_ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCAmelCase_ : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
UpperCAmelCase_ : Dict = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
def A ( self : Union[str, Any] ) -> int:
pass
def A ( self : int ) -> Any:
UpperCAmelCase_ : Any = ReformerTokenizer(_A , keep_accents=_A )
UpperCAmelCase_ : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ : List[str] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def A ( self : List[str] ) -> Optional[int]:
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : Tuple = '''Hello World!'''
UpperCAmelCase_ : int = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def A ( self : List[Any] ) -> str:
UpperCAmelCase_ : Tuple = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
UpperCAmelCase_ : int = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def A ( self : List[str] ) -> Optional[int]:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
UpperCAmelCase_ : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ : List[Any] = ''' '''.join(_A )
UpperCAmelCase_ : str = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' )
UpperCAmelCase_ : Any = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
UpperCAmelCase_ : List[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
UpperCAmelCase_ : Any = encoded_sequence['''input_ids'''].shape
UpperCAmelCase_ : Optional[int] = ReformerModel(_A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def A ( self : int ) -> Optional[Any]:
# fmt: off
UpperCAmelCase_ : int = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
UpperCAmelCase_ : Optional[Any] = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=_A , sequences=_A , )
| 304
| 0
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowercase__ ( snake_case_ :Callable , snake_case_ :float , snake_case_ :float , snake_case_ :float , snake_case_ :float ):
__UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
__UpperCAmelCase = np.zeros((n + 1,) )
__UpperCAmelCase = ya
__UpperCAmelCase = xa
for k in range(snake_case_ ):
__UpperCAmelCase = y[k] + step_size * ode_func(snake_case_ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : str ) -> list[int]:
return [ord(A ) - 9_6 for elem in plain]
def __UpperCAmelCase ( A : list[int] ) -> str:
return "".join(chr(elem + 9_6 ) for elem in encoded )
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : Tuple = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , A )
print('''Decoded:''' , decode(A ) )
if __name__ == "__main__":
main()
| 304
| 0
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Union[str, Any] =logging.get_logger(__name__)
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : List[str] = torch.load(lowerCamelCase_ ,map_location='''cpu''')
if "model" in sd.keys():
lowerCAmelCase__ : Union[str, Any] = torch.load(lowerCamelCase_ ,map_location='''cpu''')['''model''']
# pop unnecessary weights
lowerCAmelCase__ : Tuple = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCamelCase_)
lowerCAmelCase__ : str = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowerCAmelCase__ : Union[str, Any] = sd.pop(lowerCamelCase_)
lowerCAmelCase__ : Dict = list(sd.keys())
for key in keys:
if ".qkv_proj." in key:
lowerCAmelCase__ : Any = sd[key]
# We split QKV in separate Q,K,V
lowerCAmelCase__ : Optional[int] = key.replace('''.qkv_proj.''' ,'''.q_proj.''')
lowerCAmelCase__ : List[str] = key.replace('''.qkv_proj.''' ,'''.k_proj.''')
lowerCAmelCase__ : Optional[int] = key.replace('''.qkv_proj.''' ,'''.v_proj.''')
lowerCAmelCase__ : Tuple = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowerCAmelCase__ : Optional[Any] = torch.split(lowerCamelCase_ ,depth // 3 ,dim=0)
lowerCAmelCase__ : Dict = q
lowerCAmelCase__ : Optional[int] = k
lowerCAmelCase__ : Optional[Any] = v
del sd[key]
return sd
@torch.no_grad()
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Any=None):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = load_checkpoint(lowerCamelCase_)
if config is not None:
lowerCAmelCase__ : Any = OPTConfig.from_pretrained(lowerCamelCase_)
else:
lowerCAmelCase__ : Dict = OPTConfig()
lowerCAmelCase__ : Union[str, Any] = OPTModel(lowerCamelCase_).half().eval()
model.load_state_dict(lowerCamelCase_)
# Check results
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
model.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
__snake_case : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
__snake_case : List[str] =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 129
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 304
| 0
|
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__UpperCamelCase = ''''''
__UpperCamelCase = ''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_lowercase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__UpperCamelCase = 0, 0
# length[i] shows the length of palindromic substring with center i
__UpperCamelCase = [1 for i in range(len(_lowercase ) )]
# for each character in new_string find corresponding palindromic string
__UpperCamelCase = 0
for j in range(len(_lowercase ) ):
__UpperCamelCase = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_lowercase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__UpperCamelCase = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__UpperCamelCase = j - k + 1 # noqa: E741
__UpperCamelCase = j + k - 1
# update max_length and start position
if max_length < length[j]:
__UpperCamelCase = length[j]
__UpperCamelCase = j
# create that string
__UpperCamelCase = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
'''simple docstring'''
def __UpperCAmelCase ( A : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
UpperCAmelCase_ : int = gray_code_sequence_string(A )
#
# convert them to integers
for i in range(len(A ) ):
UpperCAmelCase_ : List[str] = int(sequence[i] , 2 )
return sequence
def __UpperCAmelCase ( A : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCAmelCase_ : Tuple = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCAmelCase_ : List[str] = gray_code_sequence_string(bit_count - 1 )
UpperCAmelCase_ : int = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCAmelCase_ : Union[str, Any] = '''0''' + smaller_sequence[i]
sequence.append(A )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCAmelCase_ : Dict = '''1''' + smaller_sequence[i]
sequence.append(A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class lowercase__ ( _UpperCAmelCase ):
A__ : Any ="""nllb-moe"""
A__ : Optional[int] =["""past_key_values"""]
A__ : Tuple ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , UpperCAmelCase_ : Any=128112 , UpperCAmelCase_ : List[Any]=1024 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Optional[Any]=4096 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Optional[int]=4096 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : int=0.05 , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]="relu" , UpperCAmelCase_ : Union[str, Any]=1024 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Optional[Any]="float32" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=128 , UpperCAmelCase_ : List[str]=64 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Union[str, Any]=0.001 , UpperCAmelCase_ : Optional[int]=0.001 , UpperCAmelCase_ : int="all" , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : int=1.0 , UpperCAmelCase_ : Dict=0.2 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Dict=False , **UpperCAmelCase_ : List[str] , ):
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ = router_z_loss_coef
SCREAMING_SNAKE_CASE__ = router_aux_loss_coef
SCREAMING_SNAKE_CASE__ = decoder_sparse_step
SCREAMING_SNAKE_CASE__ = encoder_sparse_step
SCREAMING_SNAKE_CASE__ = num_experts
SCREAMING_SNAKE_CASE__ = expert_capacity
SCREAMING_SNAKE_CASE__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
SCREAMING_SNAKE_CASE__ = router_dtype
SCREAMING_SNAKE_CASE__ = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE__ = batch_prioritized_routing
SCREAMING_SNAKE_CASE__ = second_expert_policy
SCREAMING_SNAKE_CASE__ = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE__ = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE__ = moe_token_dropout
SCREAMING_SNAKE_CASE__ = output_router_logits
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , **_A , )
| 176
|
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_UpperCamelCase : Any = logging.getLogger(__name__)
class snake_case__ ( UpperCamelCase):
a_ = "masked_bert"
def __init__( self : str , _A : Dict=3_05_22 , _A : Dict=7_68 , _A : Union[str, Any]=12 , _A : str=12 , _A : str=30_72 , _A : Dict="gelu" , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : Any=5_12 , _A : Union[str, Any]=2 , _A : Union[str, Any]=0.02 , _A : int=1e-12 , _A : Any=0 , _A : Any="topK" , _A : List[str]="constant" , _A : Dict=0.0 , **_A : int , ) -> Union[str, Any]:
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = pruning_method
UpperCAmelCase_ : Optional[int] = mask_init
UpperCAmelCase_ : List[Any] = mask_scale
| 304
| 0
|
UpperCAmelCase_ = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 201
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = StableDiffusionDiffEditPipeline
a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
a_ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a_ = frozenset([])
def A ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
UpperCAmelCase_ : Optional[int] = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_zero=_A , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : str , _A : List[str] , _A : Any=0 ) -> str:
UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Any = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : str = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Tuple , _A : Optional[Any] , _A : Optional[Any]=0 ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : int = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Dict = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Any = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[Any] = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : int , _A : Tuple , _A : List[str]=0 ) -> Any:
UpperCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[int] = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : List[str] ) -> Optional[Any]:
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Any = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_A )
UpperCAmelCase_ : str = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase_ : Any = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"`{optional_component}` did not stay set to None after loading." , )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_A )
UpperCAmelCase_ : List[Any] = pipe_loaded(**_A )[0]
UpperCAmelCase_ : Any = np.abs(output - output_loaded ).max()
self.assertLess(_A , 1e-4 )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : Optional[Any] = '''cpu'''
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_mask_inputs(_A )
UpperCAmelCase_ : int = pipe.generate_mask(**_A )
UpperCAmelCase_ : Tuple = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase_ : List[Any] = np.array([0] * 9 )
UpperCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def A ( self : str ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = '''cpu'''
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : str = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : List[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : int = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
def A ( self : Tuple ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : Any = '''cpu'''
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
UpperCAmelCase_ : Any = DPMSolverMultistepScheduler(**_A )
UpperCAmelCase_ : Optional[Any] = DPMSolverMultistepInverseScheduler(**_A )
UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : List[Any] = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase):
def A ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def A ( cls : Dict ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
UpperCAmelCase_ : int = raw_image.convert('''RGB''' ).resize((7_68, 7_68) )
UpperCAmelCase_ : Any = raw_image
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : int = torch.manual_seed(0 )
UpperCAmelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : List[str] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Tuple = '''a bowl of pears'''
UpperCAmelCase_ : Optional[int] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[str] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A ).latents
UpperCAmelCase_ : Any = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : str = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
def A ( self : Tuple ) -> List[str]:
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Dict = '''a bowl of pears'''
UpperCAmelCase_ : Union[str, Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[Any] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A , num_inference_steps=25 , ).latents
UpperCAmelCase_ : Dict = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : Tuple = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 304
| 0
|
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Any = '''hf-internal-testing/tiny-random-t5'''
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained(_A )
_UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A )
_UpperCamelCase : int = tokenizer('This is me' ,return_tensors='pt' )
_UpperCamelCase : Optional[int] = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_UpperCamelCase : int = model.generate(**_A )
_UpperCamelCase : Tuple = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
_UpperCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_A )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_UpperCamelCase : Tuple = model_reloaded.generate(**_A )
self.assertTrue(torch.allclose(_A ,_A ) )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = '''hf-internal-testing/tiny-random-t5'''
_UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A )
_UpperCamelCase : str = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_A ):
model.save_pretrained(_A )
_UpperCamelCase : Optional[int] = model.reverse_bettertransformer()
model.save_pretrained(_A )
| 83
|
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( UpperCamelCase):
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase_ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class snake_case__ :
def __init__( self : List[Any] , _A : List[str] , _A : Optional[Any]=13 , _A : List[str]=64 , _A : Tuple=3 , _A : int=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Union[str, Any]=[1, 2, 10] , _A : List[Any]=[7, 3, 3] , _A : Optional[Any]=[4, 2, 2] , _A : List[Any]=[2, 1, 1] , _A : Union[str, Any]=[2, 2, 2] , _A : Tuple=[False, False, True] , _A : str=[0.0, 0.0, 0.0] , _A : List[Any]=0.02 , _A : int=1e-12 , _A : Optional[int]=True , _A : List[str]=True , _A : Union[str, Any]=2 , ) -> List[Any]:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Tuple = patch_sizes
UpperCAmelCase_ : int = patch_stride
UpperCAmelCase_ : Any = patch_padding
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : Tuple = stride_kv
UpperCAmelCase_ : Optional[Any] = depth
UpperCAmelCase_ : Dict = cls_token
UpperCAmelCase_ : Dict = attention_drop_rate
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ) -> int:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A ( self : Dict , _A : List[Any] , _A : Tuple , _A : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = CvtModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = model(_A )
UpperCAmelCase_ : List[str] = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase_ : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase_ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A ( self : Any , _A : int , _A : str , _A : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : str = CvtForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : int = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = config_and_inputs
UpperCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
a_ = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = CvtModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A ( self : Any ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : int ) -> List[str]:
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def A ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def A ( self : Any ) -> Optional[Any]:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def A ( self : List[Any] ) -> Any:
pass
def A ( self : int ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_A )
UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : Dict ) -> List[str]:
def check_hidden_states_output(_A : Dict , _A : str , _A : int ):
UpperCAmelCase_ : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase_ : Optional[Any] = outputs.hidden_states
UpperCAmelCase_ : Any = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Dict = True
check_hidden_states_output(_A , _A , _A )
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : List[Any] ) -> Optional[Any]:
pass
@slow
def A ( self : Optional[int] ) -> int:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = CvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase):
@cached_property
def A ( self : Union[str, Any] ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : str = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_A )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_A )
# verify the logits
UpperCAmelCase_ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 304
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Tuple = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 281
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase)
class snake_case__ ( UpperCamelCase):
a_ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True})
a_ = Features({"text": Value("string")})
a_ = Features({})
a_ = "text"
@property
def A ( self : List[str] ) -> Dict[str, str]:
return {self.text_column: "text"}
| 304
| 0
|
import csv
import tweepy
# Twitter API credentials
__lowerCamelCase : str = ''
__lowerCamelCase : Optional[int] = ''
__lowerCamelCase : Tuple = ''
__lowerCamelCase : Optional[Any] = ''
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = tweepy.OAuthHandler(__UpperCamelCase , __UpperCamelCase )
auth.set_access_token(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = tweepy.API(__UpperCamelCase )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE__ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE__ = api.user_timeline(screen_name=__UpperCamelCase , count=2_00 )
# save most recent tweets
alltweets.extend(__UpperCamelCase )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE__ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__UpperCamelCase ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE__ = api.user_timeline(
screen_name=__UpperCamelCase , count=2_00 , max_id=__UpperCamelCase )
# save most recent tweets
alltweets.extend(__UpperCamelCase )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE__ = alltweets[-1].id - 1
print(f"""...{len(__UpperCamelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE__ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , """w""" ) as f:
SCREAMING_SNAKE_CASE__ = csv.writer(__UpperCamelCase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(__UpperCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 219
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __UpperCAmelCase ( A : int , A : Any="shi-labs/oneformer_demo" ) -> Dict:
with open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) as f:
UpperCAmelCase_ : Union[str, Any] = json.load(A )
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : str = []
for key, info in class_info.items():
UpperCAmelCase_ : Tuple = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(A ) )
UpperCAmelCase_ : Any = thing_ids
UpperCAmelCase_ : Union[str, Any] = class_names
return metadata
class snake_case__ ( unittest.TestCase):
def __init__( self : Any , _A : str , _A : Optional[int]=7 , _A : Tuple=3 , _A : Tuple=30 , _A : List[Any]=4_00 , _A : Tuple=None , _A : Optional[Any]=True , _A : Optional[Any]=True , _A : Any=[0.5, 0.5, 0.5] , _A : Any=[0.5, 0.5, 0.5] , _A : List[str]=10 , _A : Optional[int]=False , _A : Union[str, Any]=2_55 , _A : List[Any]="shi-labs/oneformer_demo" , _A : str="ade20k_panoptic.json" , _A : List[Any]=10 , ) -> Any:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Optional[int] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Tuple = {'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size
UpperCAmelCase_ : int = do_normalize
UpperCAmelCase_ : List[Any] = image_mean
UpperCAmelCase_ : Dict = image_std
UpperCAmelCase_ : str = class_info_file
UpperCAmelCase_ : Optional[Any] = prepare_metadata(_A , _A )
UpperCAmelCase_ : Tuple = num_text
UpperCAmelCase_ : Union[str, Any] = repo_path
# for the post_process_functions
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : Dict = 10
UpperCAmelCase_ : int = 10
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : str = 4
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Union[str, Any] = do_reduce_labels
UpperCAmelCase_ : str = ignore_index
def A ( self : Dict ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A ( self : Any , _A : List[Any] , _A : List[str]=False ) -> Optional[Any]:
if not batched:
UpperCAmelCase_ : Any = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ : Union[str, Any] = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase_ : int = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase_ : List[Any] = self.size['''shortest_edge''']
UpperCAmelCase_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase_ : Dict = self.size['''shortest_edge''']
UpperCAmelCase_ : str = self.size['''shortest_edge''']
else:
UpperCAmelCase_ : Dict = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ : int = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase_ : List[str] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
def A ( self : Tuple ) -> str:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ = image_processing_class
def A ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : int = OneFormerImageProcessorTester(self )
@property
def A ( self : Any ) -> int:
return self.image_processing_tester.prepare_image_processor_dict()
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''ignore_index''' ) )
self.assertTrue(hasattr(_A , '''class_info_file''' ) )
self.assertTrue(hasattr(_A , '''num_text''' ) )
self.assertTrue(hasattr(_A , '''repo_path''' ) )
self.assertTrue(hasattr(_A , '''metadata''' ) )
self.assertTrue(hasattr(_A , '''do_reduce_labels''' ) )
def A ( self : Dict ) -> Dict:
pass
def A ( self : Tuple ) -> Dict:
# Initialize image_processor
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : int = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Tuple:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Dict ) -> Union[str, Any]:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Optional[int] = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : int , _A : Any=False , _A : List[Any]=False , _A : Any="np" ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase_ : Tuple = self.image_processing_tester.num_labels
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
if with_segmentation_maps:
UpperCAmelCase_ : Any = num_labels
if is_instance_map:
UpperCAmelCase_ : Any = list(range(_A ) ) * 2
UpperCAmelCase_ : Optional[Any] = dict(enumerate(_A ) )
UpperCAmelCase_ : Dict = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase_ : Dict = [Image.fromarray(_A ) for annotation in annotations]
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , _A , return_tensors='''pt''' , instance_id_to_semantic_id=_A , pad_and_return_pixel_mask=_A , )
return inputs
def A ( self : int ) -> str:
pass
def A ( self : Tuple ) -> Union[str, Any]:
def common(_A : Optional[int]=False , _A : str=None ):
UpperCAmelCase_ : List[str] = self.comm_get_image_processor_inputs(
with_segmentation_maps=_A , is_instance_map=_A , segmentation_type=_A )
UpperCAmelCase_ : List[Any] = inputs['''mask_labels''']
UpperCAmelCase_ : Optional[Any] = inputs['''class_labels''']
UpperCAmelCase_ : int = inputs['''pixel_values''']
UpperCAmelCase_ : Tuple = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(_A , _A , _A ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_A ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_A )
common(is_instance_map=_A , segmentation_type='''pil''' )
common(is_instance_map=_A , segmentation_type='''pil''' )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : int = np.zeros((20, 50) )
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : List[Any] = binary_mask_to_rle(_A )
self.assertEqual(len(_A ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A ( self : Any ) -> List[Any]:
UpperCAmelCase_ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(_A )
self.assertEqual(len(_A ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase_ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase_ : Any = fature_extractor.post_process_semantic_segmentation(_A , target_sizes=_A )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_instance_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_panoptic_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 304
| 0
|
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
__lowercase : Optional[int] = TypeVar('''T''')
__lowercase : List[str] = Union[List[T], Tuple[T, ...]]
__lowercase : Dict = Union[T, List[T], Dict[str, T]]
__lowercase : Union[str, Any] = Union[str, bytes, os.PathLike]
| 318
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCamelCase : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_UpperCamelCase : str = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCamelCase : List[str] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __UpperCAmelCase ( A : Optional[int] ) -> int:
UpperCAmelCase_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , A )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Optional[Any] = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCAmelCase_ : Dict = collections.defaultdict(A )
UpperCAmelCase_ : str = collections.defaultdict(A )
UpperCAmelCase_ : int = collections.defaultdict(A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(A ):
UpperCAmelCase_ : int = None
if _re_tf_models.match(A ) is not None:
UpperCAmelCase_ : Optional[Any] = tf_models
UpperCAmelCase_ : Optional[int] = _re_tf_models.match(A ).groups()[0]
elif _re_flax_models.match(A ) is not None:
UpperCAmelCase_ : int = flax_models
UpperCAmelCase_ : Any = _re_flax_models.match(A ).groups()[0]
elif _re_pt_models.match(A ) is not None:
UpperCAmelCase_ : Union[str, Any] = pt_models
UpperCAmelCase_ : List[Any] = _re_pt_models.match(A ).groups()[0]
if lookup_dict is not None:
while len(A ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCAmelCase_ : Optional[int] = True
break
# Try again after removing the last word in the name
UpperCAmelCase_ : List[Any] = ''''''.join(camel_case_split(A )[:-1] )
UpperCAmelCase_ : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCAmelCase_ : List[Any] = list(A )
all_models.sort()
UpperCAmelCase_ : Dict = {'''model_type''': all_models}
UpperCAmelCase_ : Tuple = [pt_models[t] for t in all_models]
UpperCAmelCase_ : Dict = [tf_models[t] for t in all_models]
UpperCAmelCase_ : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCAmelCase_ : int = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCAmelCase_ : Any = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCAmelCase_ : Union[str, Any] = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCAmelCase_ : int = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCAmelCase_ : Dict = '''AutoTokenizer'''
UpperCAmelCase_ : str = [processors[t] for t in all_models]
return pd.DataFrame(A )
def __UpperCAmelCase ( A : Optional[int] ) -> str:
UpperCAmelCase_ : int = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCAmelCase_ : Tuple = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
UpperCAmelCase_ : Tuple = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(A , A , A ):
# The type of pipeline may not exist in this framework
if not hasattr(A , A ):
continue
# First extract all model_names
UpperCAmelCase_ : List[str] = []
for name in getattr(A , A ).values():
if isinstance(A , A ):
model_names.append(A )
else:
model_names.extend(list(A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __UpperCAmelCase ( A : int , A : Any ) -> Tuple:
UpperCAmelCase_ : Tuple = get_frameworks_table()
UpperCAmelCase_ : Any = Dataset.from_pandas(A )
UpperCAmelCase_ : str = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=A )
UpperCAmelCase_ : Union[str, Any] = Dataset.from_json(A )
UpperCAmelCase_ : Optional[int] = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(A ) )
}
UpperCAmelCase_ : str = update_pipeline_and_auto_class_table(A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCAmelCase_ : Union[str, Any] = sorted(table.keys() )
UpperCAmelCase_ : Optional[Any] = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
UpperCAmelCase_ : Dict = Dataset.from_pandas(A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(A , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(A , '''pipeline_tags.json''' ) )
if commit_sha is not None:
UpperCAmelCase_ : List[str] = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
UpperCAmelCase_ : int = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=A , repo_type='''dataset''' , token=A , commit_message=A , )
def __UpperCAmelCase ( ) -> int:
UpperCAmelCase_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCAmelCase_ : List[str] = transformers_module.pipelines.SUPPORTED_TASKS
UpperCAmelCase_ : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
UpperCAmelCase_ : Optional[Any] = pipeline_tasks[key]['''pt''']
if isinstance(A , (list, tuple) ):
UpperCAmelCase_ : Dict = model[0]
UpperCAmelCase_ : Any = model.__name__
if model not in in_table.values():
missing.append(A )
if len(A ) > 0:
UpperCAmelCase_ : List[Any] = ''', '''.join(A )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_UpperCamelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 304
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__: Optional[int] = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: Optional[Any] = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: Optional[Any] = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: str = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
a__: List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 193
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
_UpperCamelCase : Optional[int] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_UpperCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
a_ = field(
default=UpperCamelCase , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase)} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
a_ = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
a_ = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class snake_case__ :
a_ = field(
default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
a_ = field(
default=UpperCamelCase , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."})
a_ = field(default=UpperCamelCase , metadata={"help": "Whether ot not to use whole word mask."})
a_ = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"})
a_ = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a_ = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."})
a_ = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
def __UpperCAmelCase ( A : DataTrainingArguments , A : PreTrainedTokenizer , A : bool = False , A : Optional[str] = None , ) -> List[Any]:
def _dataset(A : Dict , A : str=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=A , file_path=A , block_size=args.block_size , ref_path=A , )
return LineByLineTextDataset(tokenizer=A , file_path=A , block_size=args.block_size )
else:
return TextDataset(
tokenizer=A , file_path=A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=A , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(A ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __UpperCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCAmelCase_ : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
UpperCAmelCase_ : str = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
UpperCAmelCase_ : int = AutoModelWithLMHead.from_config(A )
model.resize_token_embeddings(len(A ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
UpperCAmelCase_ : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCAmelCase_ : Dict = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCAmelCase_ : str = (
get_dataset(A , tokenizer=A , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCAmelCase_ : Any = (
get_dataset(A , tokenizer=A , evaluate=A , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCAmelCase_ : Optional[int] = DataCollatorForPermutationLanguageModeling(
tokenizer=A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCAmelCase_ : Tuple = DataCollatorForWholeWordMask(
tokenizer=A , mlm_probability=data_args.mlm_probability )
else:
UpperCAmelCase_ : List[str] = DataCollatorForLanguageModeling(
tokenizer=A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase_ : Any = Trainer(
model=A , args=A , data_collator=A , train_dataset=A , eval_dataset=A , prediction_loss_only=A , )
# Training
if training_args.do_train:
UpperCAmelCase_ : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=A )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_ : Tuple = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ : Dict = trainer.evaluate()
UpperCAmelCase_ : Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
UpperCAmelCase_ : Optional[int] = {'''perplexity''': perplexity}
UpperCAmelCase_ : int = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(A , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , A , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(A )
return results
def __UpperCAmelCase ( A : Tuple ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 304
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Optional[Any] = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 75
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_UpperCamelCase : Optional[int] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class snake_case__ ( unittest.TestCase):
@classmethod
def A ( cls : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(_A )
@classmethod
def A ( cls : int ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : List[str] = FlaxBertModel(_A )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A , repo_id='''test-model-flax''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel(_A )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[int] = flatten_dict(modela.params )
UpperCAmelCase_ : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase_ : int = False
return models_are_equal
@require_flax
class snake_case__ ( unittest.TestCase):
def A ( self : Any ) -> Any:
UpperCAmelCase_ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Any = FlaxBertModel(_A )
UpperCAmelCase_ : Tuple = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) )
with self.assertRaises(_A ):
UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Tuple:
UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Tuple = FlaxBertModel(_A )
UpperCAmelCase_ : str = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) , max_shard_size='''10KB''' )
with self.assertRaises(_A ):
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Optional[int]:
UpperCAmelCase_ : int = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Optional[Any] = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
| 304
| 0
|
"""simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :Dict , snake_case_ :int , snake_case_ :str="attention" ):
__UpperCAmelCase = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
__UpperCAmelCase = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
__UpperCAmelCase = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
__UpperCAmelCase = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def lowercase__ ( snake_case_ :Dict , snake_case_ :Union[str, Any] , snake_case_ :int , snake_case_ :Optional[int]=False ):
if split_mlp_wi:
__UpperCAmelCase = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
__UpperCAmelCase = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
__UpperCAmelCase = (wi_a, wi_a)
else:
__UpperCAmelCase = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
__UpperCAmelCase = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :Dict , snake_case_ :Any ):
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def lowercase__ ( snake_case_ :dict , *, snake_case_ :int , snake_case_ :bool ):
__UpperCAmelCase = traverse_util.flatten_dict(variables['''target'''] )
__UpperCAmelCase = {'''/'''.join(snake_case_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCAmelCase = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , snake_case_ )
__UpperCAmelCase = collections.OrderedDict()
# Shared embeddings.
__UpperCAmelCase = old['''token_embedder/embedding''']
# Encoder.
for i in range(snake_case_ ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase = tax_layer_norm_lookup(snake_case_ , snake_case_ , '''encoder''' , '''pre_attention_layer_norm''' )
__UpperCAmelCase = tax_attention_lookup(snake_case_ , snake_case_ , '''encoder''' , '''attention''' )
__UpperCAmelCase = layer_norm
__UpperCAmelCase = k.T
__UpperCAmelCase = o.T
__UpperCAmelCase = q.T
__UpperCAmelCase = v.T
# Block i, layer 1 (MLP).
__UpperCAmelCase = tax_layer_norm_lookup(snake_case_ , snake_case_ , '''encoder''' , '''pre_mlp_layer_norm''' )
__UpperCAmelCase = tax_mlp_lookup(snake_case_ , snake_case_ , '''encoder''' , snake_case_ )
__UpperCAmelCase = layer_norm
if split_mlp_wi:
__UpperCAmelCase = wi[0].T
__UpperCAmelCase = wi[1].T
else:
__UpperCAmelCase = wi.T
__UpperCAmelCase = wo.T
__UpperCAmelCase = old[
'''encoder/relpos_bias/rel_embedding'''
].T
__UpperCAmelCase = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(snake_case_ ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase = tax_layer_norm_lookup(snake_case_ , snake_case_ , '''decoder''' , '''pre_self_attention_layer_norm''' )
__UpperCAmelCase = tax_attention_lookup(snake_case_ , snake_case_ , '''decoder''' , '''self_attention''' )
__UpperCAmelCase = layer_norm
__UpperCAmelCase = k.T
__UpperCAmelCase = o.T
__UpperCAmelCase = q.T
__UpperCAmelCase = v.T
# Block i, layer 1 (Cross Attention).
__UpperCAmelCase = tax_layer_norm_lookup(snake_case_ , snake_case_ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
__UpperCAmelCase = tax_attention_lookup(snake_case_ , snake_case_ , '''decoder''' , '''encoder_decoder_attention''' )
__UpperCAmelCase = layer_norm
__UpperCAmelCase = k.T
__UpperCAmelCase = o.T
__UpperCAmelCase = q.T
__UpperCAmelCase = v.T
# Block i, layer 2 (MLP).
__UpperCAmelCase = tax_layer_norm_lookup(snake_case_ , snake_case_ , '''decoder''' , '''pre_mlp_layer_norm''' )
__UpperCAmelCase = tax_mlp_lookup(snake_case_ , snake_case_ , '''decoder''' , snake_case_ )
__UpperCAmelCase = layer_norm
if split_mlp_wi:
__UpperCAmelCase = wi[0].T
__UpperCAmelCase = wi[1].T
else:
__UpperCAmelCase = wi.T
__UpperCAmelCase = wo.T
__UpperCAmelCase = old['''decoder/decoder_norm/scale''']
__UpperCAmelCase = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCAmelCase = old['''decoder/logits_dense/kernel'''].T
return new
def lowercase__ ( snake_case_ :Any , snake_case_ :bool ):
__UpperCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
__UpperCAmelCase = state_dict['''shared.weight''']
return state_dict
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :Dict , snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = checkpoints.load_tax_checkpoint(snake_case_ )
__UpperCAmelCase = convert_tax_to_pytorch(snake_case_ , num_layers=config.num_layers , is_encoder_only=snake_case_ )
__UpperCAmelCase = make_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ , strict=snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Optional[Any] , snake_case_ :Tuple , snake_case_ :bool = False ):
__UpperCAmelCase = TaConfig.from_json_file(snake_case_ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCAmelCase = TaEncoderModel(snake_case_ )
else:
__UpperCAmelCase = TaForConditionalGeneration(snake_case_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(snake_case_ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case_ )
print('''Done''' )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
_lowercase : Optional[int] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 332
|
'''simple docstring'''
_UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_UpperCamelCase : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
_UpperCamelCase : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 304
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case : Tuple ={
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int =[
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] =[
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__snake_case : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 129
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __UpperCAmelCase ( A : List[str] , A : Any , A : Optional[int] , A : Optional[int] ) -> Optional[Any]:
if isinstance(A , A ):
UpperCAmelCase_ : Any = np.full((len(A ), sequence_length, 2) , A )
else:
UpperCAmelCase_ : int = np.full((len(A ), sequence_length) , A )
for i, tensor in enumerate(A ):
if padding_side == "right":
if isinstance(A , A ):
UpperCAmelCase_ : Tuple = tensor[:sequence_length]
else:
UpperCAmelCase_ : Dict = tensor[:sequence_length]
else:
if isinstance(A , A ):
UpperCAmelCase_ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase_ : int = tensor[:sequence_length]
return out_tensor.tolist()
def __UpperCAmelCase ( A : List[Any] ) -> str:
UpperCAmelCase_ : Dict = ord(A )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
UpperCAmelCase_ : Union[str, Any] = unicodedata.category(A )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class snake_case__ ( UpperCamelCase):
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = -100
a_ = "pt"
def A ( self : List[Any] , _A : Dict ) -> Tuple:
import torch
UpperCAmelCase_ : Dict = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase_ : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase_ : Tuple = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase_ : Any = torch.tensor(batch['''entity_ids'''] ).shape[1]
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase_ : Optional[Any] = [
list(_A ) + [self.label_pad_token_id] * (sequence_length - len(_A )) for label in labels
]
else:
UpperCAmelCase_ : Any = [
[self.label_pad_token_id] * (sequence_length - len(_A )) + list(_A ) for label in labels
]
UpperCAmelCase_ : Union[str, Any] = [feature['''ner_tags'''] for feature in features]
UpperCAmelCase_ : Union[str, Any] = padding_tensor(_A , -1 , _A , _A )
UpperCAmelCase_ : List[str] = [feature['''original_entity_spans'''] for feature in features]
UpperCAmelCase_ : int = padding_tensor(_A , (-1, -1) , _A , _A )
UpperCAmelCase_ : Union[str, Any] = {k: torch.tensor(_A , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 304
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small',return_dict=_A ).to(_A )
__UpperCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__UpperCamelCase = tokenizer('Hello there',return_tensors='pt' ).input_ids
__UpperCamelCase = tokenizer('Hi I am',return_tensors='pt' ).input_ids
__UpperCamelCase = model(input_ids.to(_A ),labels=labels.to(_A ) ).loss
__UpperCamelCase = -(labels.shape[-1] * loss.item())
__UpperCamelCase = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 310
|
'''simple docstring'''
import functools
def __UpperCAmelCase ( A : str , A : str ) -> int:
UpperCAmelCase_ : Optional[Any] = len(A )
UpperCAmelCase_ : List[str] = len(A )
@functools.cache
def min_distance(A : int , A : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , A ) , 1 + min_distance(A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__ ( _UpperCAmelCase ):
A__ : str =["""image_processor""", """tokenizer"""]
A__ : str ="""ChineseCLIPImageProcessor"""
A__ : int =("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Any , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _A , )
SCREAMING_SNAKE_CASE__ = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE__ = self.image_processor
def __call__( self : Optional[int] , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Union[str, Any] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(_A , return_tensors=_A , **_A )
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) , tensor_type=_A )
def A_ ( self : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ):
return self.tokenizer.batch_decode(*_A , **_A )
def A_ ( self : Optional[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ):
return self.tokenizer.decode(*_A , **_A )
@property
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _A , )
return self.image_processor_class
| 176
|
'''simple docstring'''
def __UpperCAmelCase ( A : int = 1_0_0_0 ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = 1, 1
UpperCAmelCase_ : Dict = []
for i in range(1 , n + 1 ):
UpperCAmelCase_ : Optional[int] = prev_numerator + 2 * prev_denominator
UpperCAmelCase_ : Tuple = prev_numerator + prev_denominator
if len(str(A ) ) > len(str(A ) ):
result.append(A )
UpperCAmelCase_ : Optional[Any] = numerator
UpperCAmelCase_ : Optional[int] = denominator
return len(A )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 304
| 0
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : List[Any] = VOCAB_FILES_NAMES
a : Dict = PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : int = ["input_ids", "attention_mask"]
def __init__( self, __magic_name__, __magic_name__=False, __magic_name__=False, __magic_name__=False, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__ = None, **__magic_name__, ) -> None:
"""simple docstring"""
UpperCamelCase__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase__ : Dict = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
UpperCamelCase__ : str = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCamelCase__ : Union[str, Any] = '''<|endoftext|>''' if eos_token is None else eos_token
UpperCamelCase__ : Union[str, Any] = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCamelCase__ : Optional[int] = unk_token if pad_token is None else pad_token
UpperCamelCase__ : Dict = eos_token if bos_token is None else bos_token
else:
UpperCamelCase__ : Any = '''<pad>''' if pad_token is None else pad_token
UpperCamelCase__ : Any = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=_A, remove_space=_A, keep_accents=_A, bos_token=_A, eos_token=_A, unk_token=_A, pad_token=_A, sp_model_kwargs=self.sp_model_kwargs, **_A, )
UpperCamelCase__ : int = do_lower_case
UpperCamelCase__ : Union[str, Any] = remove_space
UpperCamelCase__ : Optional[Any] = keep_accents
UpperCamelCase__ : Dict = vocab_file
UpperCamelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
# Used for whitespace normalization in input texts
# fmt : off
UpperCamelCase__ : int = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCamelCase__ : Dict = re.compile(
f"[{''.join(map(_A, list(range(0, 9 ) ) + list(range(11, 32 ) ) + list(range(127, 160 ) ) + [160, 173, 8203] ) )}]" )
def __getstate__( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.__dict__.copy()
UpperCamelCase__ : List[str] = None
return state
def __setstate__( self, __magic_name__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Any = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
UpperCamelCase__ : Optional[int] = {}
UpperCamelCase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return len(self.sp_model )
def UpperCamelCase__ ( self, __magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.non_printing_characters_re.sub('''''', _A )
# Normalize whitespaces
UpperCamelCase__ : Optional[Any] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
UpperCamelCase__ : List[str] = unicodedata.normalize('''NFC''', _A )
return text
def UpperCamelCase__ ( self, __magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.preprocess_text(_A )
return self.sp_model.encode(_A, out_type=_A )
def UpperCamelCase__ ( self, __magic_name__ ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(_A )
def UpperCamelCase__ ( self, __magic_name__ ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(_A )
@staticmethod
def UpperCamelCase__ ( __magic_name__ ) -> str:
"""simple docstring"""
return out_string
def UpperCamelCase__ ( self, __magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : int = []
UpperCamelCase__ : Optional[Any] = ''''''
UpperCamelCase__ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A ) + token
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = []
else:
current_sub_tokens.append(_A )
UpperCamelCase__ : Any = False
out_string += self.sp_model.decode(_A )
return out_string
def UpperCamelCase__ ( self ) -> Dict[str, int]:
"""simple docstring"""
UpperCamelCase__ : str = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__ : Optional[int] = os.path.join(
_A, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A, '''wb''' ) as fi:
UpperCamelCase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(_A, _A ):
UpperCamelCase__ : int = self.preprocess_text(_A )
UpperCamelCase__ : str = self.sp_model.encode(_A )
else:
UpperCamelCase__ : List[Any] = [self.preprocess_text(_A ) for t in text]
UpperCamelCase__ : Union[str, Any] = self.sp_model.encode(_A )
if return_tensors is True or return_tensors == "pt":
UpperCamelCase__ : Optional[int] = torch.tensor(_A )
return token_ids
def UpperCamelCase__ ( self, __magic_name__ ) -> str:
"""simple docstring"""
return self.sp_model.decode(_A )
def UpperCamelCase__ ( self, __magic_name__ ) -> List[int]:
"""simple docstring"""
UpperCamelCase__ : str = [f"User: {text}" if is_user else f"Bot: {text}" for is_user, text in conversation.iter_texts()]
UpperCamelCase__ : Optional[int] = (
f"{self.eos_token}{self.bos_token}" + f"{self.bos_token}".join(_A ) + f"{self.bos_token}Bot:"
)
return self.encode(text=_A )
| 201
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case__ ( unittest.TestCase):
def __init__( self : int , _A : List[str] , _A : Dict=7 , _A : List[str]=3 , _A : List[str]=18 , _A : Dict=30 , _A : Union[str, Any]=4_00 , _A : List[str]=True , _A : List[str]=None , _A : int=True , _A : Tuple=None , _A : Union[str, Any]=True , _A : Tuple=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Tuple=False , ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Optional[Any] = do_center_crop
UpperCAmelCase_ : Optional[int] = crop_size
UpperCAmelCase_ : Tuple = do_normalize
UpperCAmelCase_ : Optional[Any] = image_mean
UpperCAmelCase_ : int = image_std
UpperCAmelCase_ : List[Any] = do_reduce_labels
def A ( self : Union[str, Any] ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : Optional[Any] = Image.open(dataset[0]['''file'''] )
UpperCAmelCase_ : str = Image.open(dataset[1]['''file'''] )
return image, map
def __UpperCAmelCase ( ) -> Any:
UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : int = Image.open(ds[0]['''file'''] )
UpperCAmelCase_ : Optional[Any] = Image.open(ds[1]['''file'''] )
UpperCAmelCase_ : Dict = Image.open(ds[2]['''file'''] )
UpperCAmelCase_ : List[str] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = BeitImageProcessor if is_vision_available() else None
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = BeitImageProcessingTester(self )
@property
def A ( self : List[Any] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''center_crop''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
def A ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , _A )
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , _A )
def A ( self : Optional[Any] ) -> Any:
pass
def A ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[int] ) -> str:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Any ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
UpperCAmelCase_ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : List[str] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_semantic_batch_inputs()
UpperCAmelCase_ : int = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def A ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 304
| 0
|
'''simple docstring'''
import random
class lowercase__ :
@staticmethod
def UpperCamelCase_ ( lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Dict = [ord(_A ) for i in text]
_UpperCamelCase : List[str] = []
_UpperCamelCase : Any = []
for i in plain:
_UpperCamelCase : int = random.randint(1 ,300 )
_UpperCamelCase : str = (i + k) * k
cipher.append(_A )
key.append(_A )
return cipher, key
@staticmethod
def UpperCamelCase_ ( lowerCamelCase__ : list[int] ,lowerCamelCase__ : list[int] ):
'''simple docstring'''
_UpperCamelCase : Dict = []
for i in range(len(_A ) ):
_UpperCamelCase : int = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_A ) )
return "".join(_A )
if __name__ == "__main__":
snake_case_ : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 83
|
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class snake_case__ ( enum.Enum):
a_ = 0
a_ = 1
a_ = 2
@add_end_docstrings(UpperCamelCase)
class snake_case__ ( UpperCamelCase):
a_ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : List[str] , *_A : Dict , **_A : int ) -> Optional[int]:
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase_ : Dict = None
if self.model.config.prefix is not None:
UpperCAmelCase_ : Tuple = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase_ : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._sanitize_parameters(prefix=_A , **self._forward_params )
UpperCAmelCase_ : int = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase_ : List[str] = {**self._forward_params, **forward_params}
def A ( self : Union[str, Any] , _A : int=None , _A : str=None , _A : Union[str, Any]=None , _A : List[Any]=None , _A : List[Any]=None , _A : int=None , _A : Optional[int]=None , _A : List[Any]=None , **_A : List[Any] , ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = {}
if prefix is not None:
UpperCAmelCase_ : List[Any] = prefix
if prefix:
UpperCAmelCase_ : Tuple = self.tokenizer(
_A , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase_ : List[Any] = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
''' [None, \'hole\']''' )
UpperCAmelCase_ : Union[str, Any] = handle_long_generation
preprocess_params.update(_A )
UpperCAmelCase_ : Optional[int] = generate_kwargs
UpperCAmelCase_ : Tuple = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase_ : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase_ : List[Any] = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase_ : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ : Any = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase_ : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def A ( self : Dict , *_A : Optional[Any] , **_A : Any ) -> Any:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_A , **_A )
def __call__( self : List[Any] , _A : Union[str, Any] , **_A : List[str] ) -> Dict:
return super().__call__(_A , **_A )
def A ( self : List[Any] , _A : List[Any] , _A : Any="" , _A : Dict=None , **_A : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.tokenizer(
prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase_ : str = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase_ : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase_ : Optional[int] = generate_kwargs['''max_new_tokens''']
else:
UpperCAmelCase_ : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase_ : Dict = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
UpperCAmelCase_ : List[str] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase_ : Optional[int] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def A ( self : List[str] , _A : Optional[Any] , **_A : str ) -> Optional[int]:
UpperCAmelCase_ : Any = model_inputs['''input_ids''']
UpperCAmelCase_ : Dict = model_inputs.get('''attention_mask''' , _A )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = 1
else:
UpperCAmelCase_ : Optional[int] = input_ids.shape[0]
UpperCAmelCase_ : Dict = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase_ : List[str] = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
UpperCAmelCase_ : str = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase_ : Any = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase_ : Optional[Any] = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase_ : Union[str, Any] = self.model.generate(input_ids=_A , attention_mask=_A , **_A )
UpperCAmelCase_ : Any = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase_ : List[str] = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase_ : int = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def A ( self : int , _A : List[Any] , _A : Dict=ReturnType.FULL_TEXT , _A : Dict=True ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = model_outputs['''generated_sequence'''][0]
UpperCAmelCase_ : int = model_outputs['''input_ids''']
UpperCAmelCase_ : str = model_outputs['''prompt_text''']
UpperCAmelCase_ : Any = generated_sequence.numpy().tolist()
UpperCAmelCase_ : int = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ : Optional[Any] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase_ : Any = self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase_ : List[str] = 0
else:
UpperCAmelCase_ : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase_ : Dict = prompt_text + text[prompt_length:]
else:
UpperCAmelCase_ : Dict = text[prompt_length:]
UpperCAmelCase_ : List[str] = {'''generated_text''': all_text}
records.append(_A )
return records
| 304
| 0
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self , _a , _a=3 , _a=32 , _a=3 , _a=10 , _a=[10, 20, 30, 40] , _a=[1, 1, 2, 1] , _a=True , _a=True , _a="relu" , _a=3 , _a=None , ):
__magic_name__ : Optional[Any] = parent
__magic_name__ : str = batch_size
__magic_name__ : int = image_size
__magic_name__ : Tuple = num_channels
__magic_name__ : Dict = embeddings_size
__magic_name__ : Union[str, Any] = hidden_sizes
__magic_name__ : List[str] = depths
__magic_name__ : int = is_training
__magic_name__ : str = use_labels
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : str = num_labels
__magic_name__ : Any = scope
__magic_name__ : List[str] = len(_A )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Tuple = None
if self.use_labels:
__magic_name__ : Any = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Tuple = TFRegNetModel(config=_A )
__magic_name__ : int = model(_A , training=_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : List[str] = self.num_labels
__magic_name__ : Any = TFRegNetForImageClassification(_A )
__magic_name__ : List[str] = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = self.prepare_config_and_inputs()
__magic_name__ : int = config_and_inputs
__magic_name__ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
UpperCamelCase__ = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = TFRegNetModelTester(self )
__magic_name__ : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A )
def SCREAMING_SNAKE_CASE ( self ):
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def SCREAMING_SNAKE_CASE ( self ):
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Optional[Any] = model_class(_A )
__magic_name__ : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Any = [*signature.parameters.keys()]
__magic_name__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def SCREAMING_SNAKE_CASE ( self ):
def check_hidden_states_output(_a , _a , _a ):
__magic_name__ : List[Any] = model_class(_A )
__magic_name__ : Optional[Any] = model(**self._prepare_for_class(_A , _A ) , training=_A )
__magic_name__ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__magic_name__ : Tuple = layer_type
__magic_name__ : Any = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : List[str] = True
check_hidden_states_output(_A , _A , _A )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_a , _a , _a , _a={} ):
__magic_name__ : List[str] = model(_A , return_dict=_A , **_A )
__magic_name__ : Any = model(_A , return_dict=_A , **_A ).to_tuple()
def recursive_check(_a , _a ):
if isinstance(_A , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_A , _A ):
recursive_check(_A , _A )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_A , _A ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
f''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(_A , _A )
for model_class in self.all_model_classes:
__magic_name__ : str = model_class(_A )
__magic_name__ : int = self._prepare_for_class(_A , _A )
__magic_name__ : Optional[Any] = self._prepare_for_class(_A , _A )
check_equivalence(_A , _A , _A )
__magic_name__ : str = self._prepare_for_class(_A , _A , return_labels=_A )
__magic_name__ : List[str] = self._prepare_for_class(_A , _A , return_labels=_A )
check_equivalence(_A , _A , _A )
__magic_name__ : Optional[int] = self._prepare_for_class(_A , _A )
__magic_name__ : List[Any] = self._prepare_for_class(_A , _A )
check_equivalence(_A , _A , _A , {"output_hidden_states": True} )
__magic_name__ : List[Any] = self._prepare_for_class(_A , _A , return_labels=_A )
__magic_name__ : Dict = self._prepare_for_class(_A , _A , return_labels=_A )
check_equivalence(_A , _A , _A , {"output_hidden_states": True} )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : str = TFRegNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
__magic_name__ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__magic_name__ : Dict = self.default_image_processor
__magic_name__ : Any = prepare_img()
__magic_name__ : List[Any] = image_processor(images=_A , return_tensors="tf" )
# forward pass
__magic_name__ : int = model(**_A , training=_A )
# verify the logits
__magic_name__ : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
__magic_name__ : List[str] = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1e-4 )
| 281
|
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCAmelCase ( A : int , A : int , A : bool , A : list[int] , A : float ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
)
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : List[str] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
UpperCAmelCase_ : List[Any] = math.log(len(A ) , 2 )
print(F"Optimal value : {minimax(0 , 0 , A , A , A )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 304
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = "trocr"
lowerCAmelCase_ = ["past_key_values"]
lowerCAmelCase_ = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : Optional[Any] , _lowercase : List[Any]=5_02_65 , _lowercase : Dict=10_24 , _lowercase : Any=12 , _lowercase : Dict=16 , _lowercase : int=40_96 , _lowercase : Any="gelu" , _lowercase : Tuple=5_12 , _lowercase : Dict=0.1 , _lowercase : Tuple=0.0 , _lowercase : str=0.0 , _lowercase : str=2 , _lowercase : List[Any]=0.02 , _lowercase : Any=0.0 , _lowercase : str=True , _lowercase : Dict=False , _lowercase : Dict=True , _lowercase : Optional[int]=True , _lowercase : List[Any]=1 , _lowercase : str=0 , _lowercase : Any=2 , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = scale_embedding
SCREAMING_SNAKE_CASE__ = use_learned_position_embeddings
SCREAMING_SNAKE_CASE__ = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 219
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : list , A : int , A : int , A : int ) -> list:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase_ : List[Any] = result + left + right
return input_list
def __UpperCAmelCase ( A : list ) -> list:
if len(A ) <= 1:
return input_list
UpperCAmelCase_ : List[str] = list(A )
# iteration for two-way merging
UpperCAmelCase_ : Tuple = 2
while p <= len(A ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(A ) , A ):
UpperCAmelCase_ : Union[str, Any] = i
UpperCAmelCase_ : int = i + p - 1
UpperCAmelCase_ : Any = (low + high + 1) // 2
UpperCAmelCase_ : Union[str, Any] = merge(A , A , A , A )
# final merge of last two parts
if p * 2 >= len(A ):
UpperCAmelCase_ : str = i
UpperCAmelCase_ : Tuple = merge(A , 0 , A , len(A ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
_UpperCamelCase : List[str] = []
else:
_UpperCamelCase : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 304
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Tuple = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__lowercase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 318
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42
a_ = 42
a_ = 42
a_ = 42
a_ = 42
def A ( self : Tuple ) -> Optional[int]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def A ( self : List[Any] ) -> Union[str, Any]:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def A ( self : Any ) -> Optional[Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def A ( self : Optional[int] ) -> torch.Tensor:
UpperCAmelCase_ : Dict = torch.arange(self.height * self.width )
UpperCAmelCase_ : int = torch.stack(
[
pixel_indices % self.width,
torch.div(_A , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ , *UpperCAmelCase_ : Union[str, Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(_A ) )
UpperCAmelCase_ : Any = self.get_image_coords()
UpperCAmelCase_ : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Union[str, Any] = self.get_camera_rays(_A )
UpperCAmelCase_ : str = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def A ( self : Optional[int] , _A : torch.Tensor ) -> torch.Tensor:
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : Dict = coords.view(_A , -1 , 2 )
UpperCAmelCase_ : Union[str, Any] = self.resolution()
UpperCAmelCase_ : int = self.fov()
UpperCAmelCase_ : Dict = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[int] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Any = fracs.view(_A , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(_A , 1 , 3 )
+ self.x.view(_A , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=_A )
UpperCAmelCase_ : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_A , *_A , 2 , 3 )
def A ( self : Tuple , _A : int , _A : int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , )
def __UpperCAmelCase ( A : int ) -> DifferentiableProjectiveCamera:
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : str = np.array([np.sin(A ), np.cos(A ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : Optional[int] = -z * 4
UpperCAmelCase_ : Optional[int] = np.array([np.cos(A ), -np.sin(A ), 0.0] )
UpperCAmelCase_ : List[Any] = np.cross(A , A )
origins.append(A )
xs.append(A )
ys.append(A )
zs.append(A )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(A , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A , axis=0 ) ).float() , width=A , height=A , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A )) , )
| 304
| 0
|
from copy import deepcopy
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase = None,__lowerCamelCase = None ):
if arr is None and size is not None:
A__ = size
A__ = [0] * size
elif arr is not None:
self.init(_A )
else:
raise ValueError('''Either arr or size must be specified''' )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = len(_A )
A__ = deepcopy(_A )
for i in range(1,self.size ):
A__ = self.next_(_A )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCamelCase ( self ):
A__ = self.tree[:]
for i in range(self.size - 1,0,-1 ):
A__ = self.next_(_A )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCamelCase ( __lowerCamelCase ):
return index + (index & (-index))
@staticmethod
def UpperCamelCase ( __lowerCamelCase ):
return index - (index & (-index))
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A__ = self.next_(_A )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
self.add(_A,value - self.get(_A ) )
def UpperCamelCase ( self,__lowerCamelCase ):
if right == 0:
return 0
A__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A__ = self.prev(_A )
return result
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
return self.prefix(_A ) - self.prefix(_A )
def UpperCamelCase ( self,__lowerCamelCase ):
return self.query(_A,index + 1 )
def UpperCamelCase ( self,__lowerCamelCase ):
value -= self.tree[0]
if value < 0:
return -1
A__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193
|
'''simple docstring'''
import random
class snake_case__ :
@staticmethod
def A ( _A : str ) -> tuple[list[int], list[int]]:
UpperCAmelCase_ : Dict = [ord(_A ) for i in text]
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Any = []
for i in plain:
UpperCAmelCase_ : int = random.randint(1 , 3_00 )
UpperCAmelCase_ : str = (i + k) * k
cipher.append(_A )
key.append(_A )
return cipher, key
@staticmethod
def A ( _A : list[int] , _A : list[int] ) -> str:
UpperCAmelCase_ : Dict = []
for i in range(len(_A ) ):
UpperCAmelCase_ : int = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_A ) )
return "".join(_A )
if __name__ == "__main__":
_UpperCamelCase , _UpperCamelCase : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 304
| 0
|
'''simple docstring'''
from math import factorial, radians
def a_ ( __snake_case : float , __snake_case : int = 18 , __snake_case : int = 10 ) -> float:
"""simple docstring"""
lowerCamelCase_ =angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
lowerCamelCase_ =radians(__snake_case )
lowerCamelCase_ =angle_in_radians
lowerCamelCase_ =3
lowerCamelCase_ =-1
for _ in range(__snake_case ):
result += (b * (angle_in_radians**a)) / factorial(__snake_case )
lowerCamelCase_ =-b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__snake_case , __snake_case )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 75
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = ReformerTokenizer
a_ = ReformerTokenizerFast
a_ = True
a_ = False
a_ = True
def A ( self : Optional[Any] ) -> List[Any]:
super().setUp()
UpperCAmelCase_ : Tuple = ReformerTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : List[Any] = '''<s>'''
UpperCAmelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_A ) , 10_00 )
def A ( self : Optional[int] ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def A ( self : Optional[Any] ) -> List[Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ : Any = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize(_A )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : List[str] = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase_ : int = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = tokenizer.encode(_A )
UpperCAmelCase_ : List[str] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def A ( self : Tuple , _A : Dict=15 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(_A , **_A )
# Simple input
UpperCAmelCase_ : Optional[int] = '''This is a simple input'''
UpperCAmelCase_ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCAmelCase_ : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
UpperCAmelCase_ : Dict = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
def A ( self : Union[str, Any] ) -> int:
pass
def A ( self : int ) -> Any:
UpperCAmelCase_ : Any = ReformerTokenizer(_A , keep_accents=_A )
UpperCAmelCase_ : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ : List[str] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def A ( self : List[str] ) -> Optional[int]:
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : Tuple = '''Hello World!'''
UpperCAmelCase_ : int = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def A ( self : List[Any] ) -> str:
UpperCAmelCase_ : Tuple = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
UpperCAmelCase_ : int = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def A ( self : List[str] ) -> Optional[int]:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
UpperCAmelCase_ : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ : List[Any] = ''' '''.join(_A )
UpperCAmelCase_ : str = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' )
UpperCAmelCase_ : Any = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
UpperCAmelCase_ : List[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
UpperCAmelCase_ : Any = encoded_sequence['''input_ids'''].shape
UpperCAmelCase_ : Optional[int] = ReformerModel(_A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def A ( self : int ) -> Optional[Any]:
# fmt: off
UpperCAmelCase_ : int = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
UpperCAmelCase_ : Optional[Any] = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=_A , sequences=_A , )
| 304
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : int , _lowercase : Union[str, Any] , _lowercase : Any=13 , _lowercase : Union[str, Any]=7 , _lowercase : str=True , _lowercase : Dict=True , _lowercase : List[Any]=True , _lowercase : List[Any]=True , _lowercase : Union[str, Any]=True , _lowercase : Optional[Any]=False , _lowercase : Tuple=False , _lowercase : int=False , _lowercase : Optional[Any]=2 , _lowercase : Optional[Any]=99 , _lowercase : Optional[Any]=0 , _lowercase : Union[str, Any]=32 , _lowercase : Union[str, Any]=5 , _lowercase : Tuple=4 , _lowercase : List[Any]=0.1 , _lowercase : Union[str, Any]=0.1 , _lowercase : Union[str, Any]=5_12 , _lowercase : int=12 , _lowercase : List[str]=2 , _lowercase : Tuple=0.02 , _lowercase : List[str]=3 , _lowercase : Union[str, Any]=4 , _lowercase : List[Any]="last" , _lowercase : str=None , _lowercase : Any=None , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_lengths
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = gelu_activation
__UpperCAmelCase = sinusoidal_embeddings
__UpperCAmelCase = causal
__UpperCAmelCase = asm
__UpperCAmelCase = n_langs
__UpperCAmelCase = vocab_size
__UpperCAmelCase = n_special
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = summary_type
__UpperCAmelCase = use_proj
__UpperCAmelCase = scope
def a ( self : Optional[int] ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_input_lengths:
__UpperCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a ( self : Any ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def a ( self : List[Any] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : str , _lowercase : Dict , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , ):
__UpperCAmelCase = FlaubertModel(config=_A )
model.to(_A )
model.eval()
__UpperCAmelCase = model(_A , lengths=_A , langs=_A )
__UpperCAmelCase = model(_A , langs=_A )
__UpperCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Optional[int] , _lowercase : List[Any] , _lowercase : int , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : str , _lowercase : Any , ):
__UpperCAmelCase = FlaubertWithLMHeadModel(_A )
model.to(_A )
model.eval()
__UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : Optional[int] , _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Any , _lowercase : List[Any] , _lowercase : List[str] , _lowercase : Tuple , ):
__UpperCAmelCase = FlaubertForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
__UpperCAmelCase = model(_A )
__UpperCAmelCase = model(_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self : Dict , _lowercase : Tuple , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Dict , ):
__UpperCAmelCase = FlaubertForQuestionAnswering(_A )
model.to(_A )
model.eval()
__UpperCAmelCase = model(_A )
__UpperCAmelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
__UpperCAmelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
(__UpperCAmelCase ) = result_with_labels.to_tuple()
__UpperCAmelCase = model(_A , start_positions=_A , end_positions=_A )
(__UpperCAmelCase ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def a ( self : List[str] , _lowercase : List[str] , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Tuple , _lowercase : Optional[int] , ):
__UpperCAmelCase = FlaubertForSequenceClassification(_A )
model.to(_A )
model.eval()
__UpperCAmelCase = model(_A )
__UpperCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a ( self : Tuple , _lowercase : List[str] , _lowercase : Tuple , _lowercase : str , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : str , _lowercase : List[Any] , _lowercase : int , ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = FlaubertForTokenClassification(_A )
model.to(_A )
model.eval()
__UpperCAmelCase = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self : Optional[int] , _lowercase : Optional[int] , _lowercase : List[Any] , _lowercase : int , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : Dict , ):
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = FlaubertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
__UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self : Any ):
__UpperCAmelCase = self.prepare_config_and_inputs()
(
__UpperCAmelCase
) = config_and_inputs
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Optional[Any] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
a__ : Any = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def a ( self : Any , _lowercase : int , _lowercase : Dict , _lowercase : List[str] , _lowercase : int , _lowercase : Dict ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a ( self : str , _lowercase : str , _lowercase : Optional[int] , _lowercase : Dict=False ):
__UpperCAmelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
__UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def a ( self : List[Any] ):
__UpperCAmelCase = FlaubertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=_A , emb_dim=37 )
def a ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def a ( self : Optional[int] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_A )
def a ( self : Tuple ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_A )
def a ( self : str ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_A )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_A )
def a ( self : Tuple ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_A )
def a ( self : int ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_A )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_A )
@slow
def a ( self : str ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = FlaubertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def a ( self : Dict ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__UpperCAmelCase = True
__UpperCAmelCase = model_class(config=_A )
__UpperCAmelCase = self._prepare_for_class(_A , _A )
__UpperCAmelCase = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) )
__UpperCAmelCase = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def a ( self : str ):
__UpperCAmelCase = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
__UpperCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
__UpperCAmelCase = model(_A )[0]
__UpperCAmelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _A )
__UpperCAmelCase = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1E-4 ) )
| 332
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : str ) -> list[int]:
return [ord(A ) - 9_6 for elem in plain]
def __UpperCAmelCase ( A : list[int] ) -> str:
return "".join(chr(elem + 9_6 ) for elem in encoded )
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : Tuple = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , A )
print('''Decoded:''' , decode(A ) )
if __name__ == "__main__":
main()
| 304
| 0
|
def lowerCAmelCase__ ( lowerCamelCase_ : int = 1000):
'''simple docstring'''
return sum(e for e in range(3 ,lowerCamelCase_) if e % 3 == 0 or e % 5 == 0)
if __name__ == "__main__":
print(f"""{solution() = }""")
| 129
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 304
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_a )
class __lowerCamelCase (_a ):
_lowercase = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_lowercase = Features({"""text""": Value("""string""" )} )
_lowercase = Features({} )
_lowercase = """text"""
@property
def snake_case_ ( self: List[str] ):
'''simple docstring'''
return {self.text_column: "text"}
| 310
|
'''simple docstring'''
def __UpperCAmelCase ( A : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
UpperCAmelCase_ : int = gray_code_sequence_string(A )
#
# convert them to integers
for i in range(len(A ) ):
UpperCAmelCase_ : List[str] = int(sequence[i] , 2 )
return sequence
def __UpperCAmelCase ( A : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCAmelCase_ : Tuple = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCAmelCase_ : List[str] = gray_code_sequence_string(bit_count - 1 )
UpperCAmelCase_ : int = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCAmelCase_ : Union[str, Any] = '''0''' + smaller_sequence[i]
sequence.append(A )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCAmelCase_ : Dict = '''1''' + smaller_sequence[i]
sequence.append(A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 0
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__snake_case = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_80_00,
'sample_size': 6_55_36,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_80_00,
'sample_size': 6_55_36,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_80_00,
'sample_size': 13_10_72,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_60_00,
'sample_size': 6_55_36,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_60_00,
'sample_size': 6_55_36,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_60_00,
'sample_size': 6_55_36,
},
}
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
return torch.atana(UpperCamelCase_ , UpperCamelCase_ ) / math.pi * 2
def _lowercase ( UpperCamelCase_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE__ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(UpperCamelCase_ , UpperCamelCase_ )
class lowercase__ ( _UpperCAmelCase ):
pass
class lowercase__ ( nn.Module ):
def __init__( self : List[str] , UpperCAmelCase_ : Dict ):
super().__init__()
SCREAMING_SNAKE_CASE__ = DiffusionAttnUnetaD(_A , n_attn_layers=4 )
SCREAMING_SNAKE_CASE__ = deepcopy(self.diffusion )
SCREAMING_SNAKE_CASE__ = torch.quasirandom.SobolEngine(1 , scramble=_A )
def _lowercase ( UpperCamelCase_ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = MODELS_MAP[model_name]['''url''']
os.system(F'wget {url} ./' )
return F'./{model_name}.ckpt'
__snake_case = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
__snake_case = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
__snake_case = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
__snake_case = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
__snake_case = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
__snake_case = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def _lowercase ( UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def _lowercase ( UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(UpperCamelCase_ ) and not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return name.replace(UpperCamelCase_ , UpperCamelCase_ )
elif name.startswith(UpperCamelCase_ ):
return [name.replace(UpperCamelCase_ , UpperCamelCase_ ) for v in value]
raise ValueError(F'Attn error with {name}' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_=13 ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
SCREAMING_SNAKE_CASE__ = 0
if string.startswith('net.3.' ):
depth += 1
SCREAMING_SNAKE_CASE__ = string[6:]
elif string.startswith('net.' ):
SCREAMING_SNAKE_CASE__ = string[4:]
while string.startswith('main.7.' ):
depth += 1
SCREAMING_SNAKE_CASE__ = string[7:]
if string.startswith('main.' ):
SCREAMING_SNAKE_CASE__ = string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE__ = string[:2]
SCREAMING_SNAKE_CASE__ = string[2:]
else:
SCREAMING_SNAKE_CASE__ = string[0]
SCREAMING_SNAKE_CASE__ = string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE__ = MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE__ = '''mid_block'''
elif depth > 0 and int(UpperCamelCase_ ) < 7:
SCREAMING_SNAKE_CASE__ = DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE__ = F'down_blocks.{depth}'
elif depth > 0 and int(UpperCamelCase_ ) > 7:
SCREAMING_SNAKE_CASE__ = UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE__ = F'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
SCREAMING_SNAKE_CASE__ = DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE__ = F'up_blocks.{max_depth - 1}' if int(UpperCamelCase_ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('.' ):
raise ValueError(F'Naming error with {input_string} and string_left: {string_left}.' )
SCREAMING_SNAKE_CASE__ = string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE__ = convert_resconv_naming(UpperCamelCase_ )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE__ = convert_attn_naming(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = new_string_left
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
SCREAMING_SNAKE_CASE__ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def _lowercase ( UpperCamelCase_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE__ = rename(UpperCamelCase_ )
# check if we need to transform from Conv => Linear for attention
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = transform_conv_attns(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = v
return new_state_dict
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
if len(UpperCamelCase_ ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE__ = v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE__ = v
else:
# qkv matrices
SCREAMING_SNAKE_CASE__ = v.shape[0]
SCREAMING_SNAKE_CASE__ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE__ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE__ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def _lowercase ( UpperCamelCase_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE__ = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
SCREAMING_SNAKE_CASE__ = download(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = MODELS_MAP[model_name]['''sample_rate''']
SCREAMING_SNAKE_CASE__ = MODELS_MAP[model_name]['''sample_size''']
SCREAMING_SNAKE_CASE__ = Object()
SCREAMING_SNAKE_CASE__ = sample_size
SCREAMING_SNAKE_CASE__ = sample_rate
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = UNetaDModel(sample_size=UpperCamelCase_ , sample_rate=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = diffusers_model.state_dict()
SCREAMING_SNAKE_CASE__ = DiffusionUncond(UpperCamelCase_ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=UpperCamelCase_ )['state_dict'] )
SCREAMING_SNAKE_CASE__ = orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE__ = orig_model.state_dict()
SCREAMING_SNAKE_CASE__ = rename_orig_weights(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(UpperCamelCase_ ) == 0, F'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('kernel' ) for k in list(UpperCamelCase_ ) ), F'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE__ = value.squeeze()
SCREAMING_SNAKE_CASE__ = value
diffusers_model.load_state_dict(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = 100
SCREAMING_SNAKE_CASE__ = 33
SCREAMING_SNAKE_CASE__ = IPNDMScheduler(num_train_timesteps=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = torch.manual_seed(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = torch.randn([1, 2, config.sample_size] , generator=UpperCamelCase_ ).to(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = torch.linspace(1 , 0 , steps + 1 , device=UpperCamelCase_ )[:-1]
SCREAMING_SNAKE_CASE__ = get_crash_schedule(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = DanceDiffusionPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE__ = pipe(num_inference_steps=UpperCamelCase_ , generator=UpperCamelCase_ ).audios
SCREAMING_SNAKE_CASE__ = sampling.iplms_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , {} )
SCREAMING_SNAKE_CASE__ = generated.clamp(-1 , 1 )
SCREAMING_SNAKE_CASE__ = (generated - audio).abs().sum()
SCREAMING_SNAKE_CASE__ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , UpperCamelCase_ )
print('Diff max' , UpperCamelCase_ )
assert diff_max < 1e-3, F'Diff max: {diff_max} is too much :-/'
print(F'Conversion for {model_name} successful!' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
__snake_case = parser.parse_args()
main(args)
| 176
|
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_UpperCamelCase : Any = logging.getLogger(__name__)
class snake_case__ ( UpperCamelCase):
a_ = "masked_bert"
def __init__( self : str , _A : Dict=3_05_22 , _A : Dict=7_68 , _A : Union[str, Any]=12 , _A : str=12 , _A : str=30_72 , _A : Dict="gelu" , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : Any=5_12 , _A : Union[str, Any]=2 , _A : Union[str, Any]=0.02 , _A : int=1e-12 , _A : Any=0 , _A : Any="topK" , _A : List[str]="constant" , _A : Dict=0.0 , **_A : int , ) -> Union[str, Any]:
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = pruning_method
UpperCAmelCase_ : Optional[int] = mask_init
UpperCAmelCase_ : List[Any] = mask_scale
| 304
| 0
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
UpperCAmelCase_ = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase__ ( cls ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[str] = TOKEN
HfFolder.save_token(_A )
@classmethod
def UpperCamelCase__ ( cls ) -> Tuple:
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
UpperCamelCase__ : List[str] = FlaxBertModel(_A )
model.push_to_hub('''test-model-flax''', use_auth_token=self._token )
UpperCamelCase__ : Any = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax" )
UpperCamelCase__ : int = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A, 1E-3, msg=f"{key} not identical" )
# Reset repo
delete_repo(token=self._token, repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A, repo_id='''test-model-flax''', push_to_hub=_A, use_auth_token=self._token )
UpperCamelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax" )
UpperCamelCase__ : Optional[Any] = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A, 1E-3, msg=f"{key} not identical" )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[str] = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
UpperCamelCase__ : Optional[Any] = FlaxBertModel(_A )
model.push_to_hub('''valid_org/test-model-flax-org''', use_auth_token=self._token )
UpperCamelCase__ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCamelCase__ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A, 1E-3, msg=f"{key} not identical" )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_A, repo_id='''valid_org/test-model-flax-org''', push_to_hub=_A, use_auth_token=self._token )
UpperCamelCase__ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCamelCase__ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A, 1E-3, msg=f"{key} not identical" )
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Optional[int] ) -> List[Any]:
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : Optional[int] = flatten_dict(modela.params )
UpperCamelCase__ : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCamelCase__ : int = False
return models_are_equal
@require_flax
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCamelCase__ : Any = FlaxBertModel(_A )
UpperCamelCase__ : Tuple = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A, _A ) )
with self.assertRaises(_A ):
UpperCamelCase__ : Optional[int] = FlaxBertModel.from_pretrained(_A )
UpperCamelCase__ : List[Any] = FlaxBertModel.from_pretrained(_A, subfolder=_A )
self.assertTrue(check_models_equal(_A, _A ) )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCamelCase__ : Tuple = FlaxBertModel(_A )
UpperCamelCase__ : str = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A, _A ), max_shard_size='''10KB''' )
with self.assertRaises(_A ):
UpperCamelCase__ : str = FlaxBertModel.from_pretrained(_A )
UpperCamelCase__ : Dict = FlaxBertModel.from_pretrained(_A, subfolder=_A )
self.assertTrue(check_models_equal(_A, _A ) )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : int = '''bert'''
UpperCamelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_A ):
UpperCamelCase__ : Tuple = FlaxBertModel.from_pretrained(_A )
UpperCamelCase__ : int = FlaxBertModel.from_pretrained(_A, subfolder=_A )
self.assertIsNotNone(_A )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = '''bert'''
UpperCamelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_A ):
UpperCamelCase__ : List[Any] = FlaxBertModel.from_pretrained(_A )
UpperCamelCase__ : List[Any] = FlaxBertModel.from_pretrained(_A, subfolder=_A )
self.assertIsNotNone(_A )
| 201
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = StableDiffusionDiffEditPipeline
a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
a_ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a_ = frozenset([])
def A ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
UpperCAmelCase_ : Optional[int] = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_zero=_A , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : str , _A : List[str] , _A : Any=0 ) -> str:
UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Any = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : str = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Tuple , _A : Optional[Any] , _A : Optional[Any]=0 ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : int = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Dict = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Any = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[Any] = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : int , _A : Tuple , _A : List[str]=0 ) -> Any:
UpperCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[int] = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : List[str] ) -> Optional[Any]:
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Any = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_A )
UpperCAmelCase_ : str = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase_ : Any = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"`{optional_component}` did not stay set to None after loading." , )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_A )
UpperCAmelCase_ : List[Any] = pipe_loaded(**_A )[0]
UpperCAmelCase_ : Any = np.abs(output - output_loaded ).max()
self.assertLess(_A , 1e-4 )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : Optional[Any] = '''cpu'''
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_mask_inputs(_A )
UpperCAmelCase_ : int = pipe.generate_mask(**_A )
UpperCAmelCase_ : Tuple = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase_ : List[Any] = np.array([0] * 9 )
UpperCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def A ( self : str ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = '''cpu'''
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : str = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : List[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : int = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
def A ( self : Tuple ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : Any = '''cpu'''
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
UpperCAmelCase_ : Any = DPMSolverMultistepScheduler(**_A )
UpperCAmelCase_ : Optional[Any] = DPMSolverMultistepInverseScheduler(**_A )
UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : List[Any] = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase):
def A ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def A ( cls : Dict ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
UpperCAmelCase_ : int = raw_image.convert('''RGB''' ).resize((7_68, 7_68) )
UpperCAmelCase_ : Any = raw_image
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : int = torch.manual_seed(0 )
UpperCAmelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : List[str] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Tuple = '''a bowl of pears'''
UpperCAmelCase_ : Optional[int] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[str] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A ).latents
UpperCAmelCase_ : Any = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : str = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
def A ( self : Tuple ) -> List[str]:
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Dict = '''a bowl of pears'''
UpperCAmelCase_ : Union[str, Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[Any] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A , num_inference_steps=25 , ).latents
UpperCAmelCase_ : Dict = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : Tuple = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 304
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Tuple = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83
|
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( UpperCamelCase):
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase_ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class snake_case__ :
def __init__( self : List[Any] , _A : List[str] , _A : Optional[Any]=13 , _A : List[str]=64 , _A : Tuple=3 , _A : int=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Union[str, Any]=[1, 2, 10] , _A : List[Any]=[7, 3, 3] , _A : Optional[Any]=[4, 2, 2] , _A : List[Any]=[2, 1, 1] , _A : Union[str, Any]=[2, 2, 2] , _A : Tuple=[False, False, True] , _A : str=[0.0, 0.0, 0.0] , _A : List[Any]=0.02 , _A : int=1e-12 , _A : Optional[int]=True , _A : List[str]=True , _A : Union[str, Any]=2 , ) -> List[Any]:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Tuple = patch_sizes
UpperCAmelCase_ : int = patch_stride
UpperCAmelCase_ : Any = patch_padding
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : Tuple = stride_kv
UpperCAmelCase_ : Optional[Any] = depth
UpperCAmelCase_ : Dict = cls_token
UpperCAmelCase_ : Dict = attention_drop_rate
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ) -> int:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A ( self : Dict , _A : List[Any] , _A : Tuple , _A : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = CvtModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = model(_A )
UpperCAmelCase_ : List[str] = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase_ : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase_ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A ( self : Any , _A : int , _A : str , _A : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : str = CvtForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : int = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = config_and_inputs
UpperCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
a_ = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = CvtModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A ( self : Any ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : int ) -> List[str]:
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def A ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def A ( self : Any ) -> Optional[Any]:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def A ( self : List[Any] ) -> Any:
pass
def A ( self : int ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_A )
UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : Dict ) -> List[str]:
def check_hidden_states_output(_A : Dict , _A : str , _A : int ):
UpperCAmelCase_ : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase_ : Optional[Any] = outputs.hidden_states
UpperCAmelCase_ : Any = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Dict = True
check_hidden_states_output(_A , _A , _A )
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : List[Any] ) -> Optional[Any]:
pass
@slow
def A ( self : Optional[int] ) -> int:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = CvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase):
@cached_property
def A ( self : Union[str, Any] ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : str = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_A )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_A )
# verify the logits
UpperCAmelCase_ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 304
| 0
|
from math import factorial
class _snake_case :
def __init__( self , _a , _a ):
__magic_name__ : str = real
if isinstance(_A , _A ):
__magic_name__ : Dict = [1] * rank
else:
__magic_name__ : Optional[Any] = rank
def __repr__( self ):
return (
f'''{self.real}+'''
f'''{"+".join(str(_A )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , _A )
def __add__( self , _a ):
if not isinstance(_A , _A ):
return Dual(self.real + other , self.duals )
__magic_name__ : Optional[Any] = self.duals.copy()
__magic_name__ : Union[str, Any] = other.duals.copy()
if len(_A ) > len(_A ):
o_dual.extend([1] * (len(_A ) - len(_A )) )
elif len(_A ) < len(_A ):
s_dual.extend([1] * (len(_A ) - len(_A )) )
__magic_name__ : str = []
for i in range(len(_A ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , _A )
UpperCamelCase__ = __add__
def __sub__( self , _a ):
return self + other * -1
def __mul__( self , _a ):
if not isinstance(_A , _A ):
__magic_name__ : Optional[Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , _A )
__magic_name__ : Dict = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , _A )
UpperCamelCase__ = __mul__
def __truediv__( self , _a ):
if not isinstance(_A , _A ):
__magic_name__ : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , _A )
raise ValueError
def __floordiv__( self , _a ):
if not isinstance(_A , _A ):
__magic_name__ : Union[str, Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , _A )
raise ValueError
def __pow__( self , _a ):
if n < 0 or isinstance(_A , _A ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
__magic_name__ : Union[str, Any] = self
for _ in range(n - 1 ):
x *= self
return x
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : Any , _snake_case : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if not callable(_snake_case ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(_snake_case , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(_snake_case , _snake_case ):
raise ValueError("differentiate() requires an int as input for order" )
__magic_name__ : Any = Dual(_snake_case , 1 )
__magic_name__ : Any = func(_snake_case )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 281
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase)
class snake_case__ ( UpperCamelCase):
a_ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True})
a_ = Features({"text": Value("string")})
a_ = Features({})
a_ = "text"
@property
def A ( self : List[str] ) -> Dict[str, str]:
return {self.text_column: "text"}
| 304
| 0
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'vocab_file': 'vocab.txt'}
__lowerCamelCase : Dict = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
__lowerCamelCase : Dict = {
'openbmb/cpm-ant-10b': 1024,
}
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = collections.OrderedDict()
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as reader:
SCREAMING_SNAKE_CASE__ = reader.readlines()
for index, token in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = token.rstrip("""\n""" )
SCREAMING_SNAKE_CASE__ = index
return vocab
class __snake_case ( lowerCamelCase_ ):
def __init__( self : Dict , _lowercase : int , _lowercase : int="<unk>" , _lowercase : Dict=2_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab
SCREAMING_SNAKE_CASE__ = unk_token
SCREAMING_SNAKE_CASE__ = max_input_chars_per_word
def __a ( self : str , _lowercase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = []
while start < len(_A ):
SCREAMING_SNAKE_CASE__ = len(_A )
SCREAMING_SNAKE_CASE__ = None
while start < end:
SCREAMING_SNAKE_CASE__ = ''''''.join(chars[start:end] )
if substr in self.vocab:
SCREAMING_SNAKE_CASE__ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
SCREAMING_SNAKE_CASE__ = end
return sub_tokens
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["input_ids", "attention_mask"]
lowerCAmelCase_ = False
def __init__( self : Optional[int] , _lowercase : Tuple , _lowercase : int="<d>" , _lowercase : str="</d>" , _lowercase : Any="<s>" , _lowercase : Dict="</s>" , _lowercase : Union[str, Any]="<pad>" , _lowercase : Optional[int]="<unk>" , _lowercase : int="</n>" , _lowercase : Optional[Any]="</_>" , _lowercase : Union[str, Any]="left" , **_lowercase : Tuple , ):
"""simple docstring"""
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
SCREAMING_SNAKE_CASE__ = bod_token
SCREAMING_SNAKE_CASE__ = eod_token
SCREAMING_SNAKE_CASE__ = load_vocab(_A )
SCREAMING_SNAKE_CASE__ = self.encoder[space_token]
SCREAMING_SNAKE_CASE__ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
SCREAMING_SNAKE_CASE__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : int ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : Dict ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : int ):
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : List[Any] ):
"""simple docstring"""
return len(self.encoder )
def __a ( self : Any ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : Optional[int] , _lowercase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def __a ( self : List[str] , _lowercase : List[str] , **_lowercase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [i for i in token_ids if i >= 0]
SCREAMING_SNAKE_CASE__ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def __a ( self : List[str] , _lowercase : Tuple ):
"""simple docstring"""
return token in self.encoder
def __a ( self : Optional[Any] , _lowercase : List[str] ):
"""simple docstring"""
return "".join(_A )
def __a ( self : Any , _lowercase : int ):
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __a ( self : Optional[Any] , _lowercase : Tuple ):
"""simple docstring"""
return self.decoder.get(_A , self.unk_token )
def __a ( self : int , _lowercase : str , _lowercase : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(_A ):
SCREAMING_SNAKE_CASE__ = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
SCREAMING_SNAKE_CASE__ = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
SCREAMING_SNAKE_CASE__ = 0
if " " in self.encoder:
SCREAMING_SNAKE_CASE__ = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
SCREAMING_SNAKE_CASE__ = self.encoder['''\n''']
del self.encoder["\n"]
SCREAMING_SNAKE_CASE__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
with open(_A , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
SCREAMING_SNAKE_CASE__ = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def __a ( self : List[str] , _lowercase : List[int] , _lowercase : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : int , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A ))
| 219
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __UpperCAmelCase ( A : int , A : Any="shi-labs/oneformer_demo" ) -> Dict:
with open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) as f:
UpperCAmelCase_ : Union[str, Any] = json.load(A )
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : str = []
for key, info in class_info.items():
UpperCAmelCase_ : Tuple = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(A ) )
UpperCAmelCase_ : Any = thing_ids
UpperCAmelCase_ : Union[str, Any] = class_names
return metadata
class snake_case__ ( unittest.TestCase):
def __init__( self : Any , _A : str , _A : Optional[int]=7 , _A : Tuple=3 , _A : Tuple=30 , _A : List[Any]=4_00 , _A : Tuple=None , _A : Optional[Any]=True , _A : Optional[Any]=True , _A : Any=[0.5, 0.5, 0.5] , _A : Any=[0.5, 0.5, 0.5] , _A : List[str]=10 , _A : Optional[int]=False , _A : Union[str, Any]=2_55 , _A : List[Any]="shi-labs/oneformer_demo" , _A : str="ade20k_panoptic.json" , _A : List[Any]=10 , ) -> Any:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Optional[int] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Tuple = {'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size
UpperCAmelCase_ : int = do_normalize
UpperCAmelCase_ : List[Any] = image_mean
UpperCAmelCase_ : Dict = image_std
UpperCAmelCase_ : str = class_info_file
UpperCAmelCase_ : Optional[Any] = prepare_metadata(_A , _A )
UpperCAmelCase_ : Tuple = num_text
UpperCAmelCase_ : Union[str, Any] = repo_path
# for the post_process_functions
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : Dict = 10
UpperCAmelCase_ : int = 10
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : str = 4
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Union[str, Any] = do_reduce_labels
UpperCAmelCase_ : str = ignore_index
def A ( self : Dict ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A ( self : Any , _A : List[Any] , _A : List[str]=False ) -> Optional[Any]:
if not batched:
UpperCAmelCase_ : Any = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ : Union[str, Any] = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase_ : int = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase_ : List[Any] = self.size['''shortest_edge''']
UpperCAmelCase_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase_ : Dict = self.size['''shortest_edge''']
UpperCAmelCase_ : str = self.size['''shortest_edge''']
else:
UpperCAmelCase_ : Dict = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ : int = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase_ : List[str] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
def A ( self : Tuple ) -> str:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ = image_processing_class
def A ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : int = OneFormerImageProcessorTester(self )
@property
def A ( self : Any ) -> int:
return self.image_processing_tester.prepare_image_processor_dict()
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''ignore_index''' ) )
self.assertTrue(hasattr(_A , '''class_info_file''' ) )
self.assertTrue(hasattr(_A , '''num_text''' ) )
self.assertTrue(hasattr(_A , '''repo_path''' ) )
self.assertTrue(hasattr(_A , '''metadata''' ) )
self.assertTrue(hasattr(_A , '''do_reduce_labels''' ) )
def A ( self : Dict ) -> Dict:
pass
def A ( self : Tuple ) -> Dict:
# Initialize image_processor
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : int = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Tuple:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Dict ) -> Union[str, Any]:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Optional[int] = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : int , _A : Any=False , _A : List[Any]=False , _A : Any="np" ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase_ : Tuple = self.image_processing_tester.num_labels
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
if with_segmentation_maps:
UpperCAmelCase_ : Any = num_labels
if is_instance_map:
UpperCAmelCase_ : Any = list(range(_A ) ) * 2
UpperCAmelCase_ : Optional[Any] = dict(enumerate(_A ) )
UpperCAmelCase_ : Dict = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase_ : Dict = [Image.fromarray(_A ) for annotation in annotations]
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , _A , return_tensors='''pt''' , instance_id_to_semantic_id=_A , pad_and_return_pixel_mask=_A , )
return inputs
def A ( self : int ) -> str:
pass
def A ( self : Tuple ) -> Union[str, Any]:
def common(_A : Optional[int]=False , _A : str=None ):
UpperCAmelCase_ : List[str] = self.comm_get_image_processor_inputs(
with_segmentation_maps=_A , is_instance_map=_A , segmentation_type=_A )
UpperCAmelCase_ : List[Any] = inputs['''mask_labels''']
UpperCAmelCase_ : Optional[Any] = inputs['''class_labels''']
UpperCAmelCase_ : int = inputs['''pixel_values''']
UpperCAmelCase_ : Tuple = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(_A , _A , _A ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_A ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_A )
common(is_instance_map=_A , segmentation_type='''pil''' )
common(is_instance_map=_A , segmentation_type='''pil''' )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : int = np.zeros((20, 50) )
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : List[Any] = binary_mask_to_rle(_A )
self.assertEqual(len(_A ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A ( self : Any ) -> List[Any]:
UpperCAmelCase_ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(_A )
self.assertEqual(len(_A ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase_ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase_ : Any = fature_extractor.post_process_semantic_segmentation(_A , target_sizes=_A )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_instance_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_panoptic_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 304
| 0
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __lowercase :
def __init__(self , A=2 , A=3 , A=6_4 , A=None ):
lowerCamelCase_ : Any = np.random.default_rng(_A )
lowerCamelCase_ : List[str] = length
lowerCamelCase_ : List[str] = rng.normal(size=(length,) ).astype(np.floataa )
lowerCamelCase_ : Optional[int] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__(self ):
return self.length
def __getitem__(self , A ):
return {"x": self.x[i], "y": self.y[i]}
class __lowercase ( torch.nn.Module ):
def __init__(self , A=0 , A=0 , A=False ):
super().__init__()
lowerCamelCase_ : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCamelCase_ : int = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCamelCase_ : str = True
def UpperCAmelCase__ (self , A=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCamelCase_ : str = False
return x * self.a[0] + self.b[0]
class __lowercase ( torch.nn.Module ):
def __init__(self , A=0 , A=0 , A=False ):
super().__init__()
lowerCamelCase_ : str = torch.nn.Parameter(torch.tensor(_A ).float() )
lowerCamelCase_ : str = torch.nn.Parameter(torch.tensor(_A ).float() )
lowerCamelCase_ : List[Any] = True
def UpperCAmelCase__ (self , A=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCamelCase_ : Optional[int] = False
return x * self.a + self.b
def lowercase_ ( _lowercase , _lowercase = 16 ) -> Tuple:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCamelCase_ : Union[str, Any] = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowerCamelCase_ : List[Any] = load_dataset('''csv''' , data_files=_lowercase )
lowerCamelCase_ : Dict = datasets['''train'''].unique('''label''' )
lowerCamelCase_ : Dict = {v: i for i, v in enumerate(_lowercase )}
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ : Any = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase , padding='''max_length''' )
if "label" in examples:
lowerCamelCase_ : Any = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ : Tuple = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(_lowercase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCamelCase_ : List[str] = DataLoader(tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=2 )
lowerCamelCase_ : str = DataLoader(tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 318
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCamelCase : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_UpperCamelCase : str = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCamelCase : List[str] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __UpperCAmelCase ( A : Optional[int] ) -> int:
UpperCAmelCase_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , A )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Optional[Any] = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCAmelCase_ : Dict = collections.defaultdict(A )
UpperCAmelCase_ : str = collections.defaultdict(A )
UpperCAmelCase_ : int = collections.defaultdict(A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(A ):
UpperCAmelCase_ : int = None
if _re_tf_models.match(A ) is not None:
UpperCAmelCase_ : Optional[Any] = tf_models
UpperCAmelCase_ : Optional[int] = _re_tf_models.match(A ).groups()[0]
elif _re_flax_models.match(A ) is not None:
UpperCAmelCase_ : int = flax_models
UpperCAmelCase_ : Any = _re_flax_models.match(A ).groups()[0]
elif _re_pt_models.match(A ) is not None:
UpperCAmelCase_ : Union[str, Any] = pt_models
UpperCAmelCase_ : List[Any] = _re_pt_models.match(A ).groups()[0]
if lookup_dict is not None:
while len(A ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCAmelCase_ : Optional[int] = True
break
# Try again after removing the last word in the name
UpperCAmelCase_ : List[Any] = ''''''.join(camel_case_split(A )[:-1] )
UpperCAmelCase_ : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCAmelCase_ : List[Any] = list(A )
all_models.sort()
UpperCAmelCase_ : Dict = {'''model_type''': all_models}
UpperCAmelCase_ : Tuple = [pt_models[t] for t in all_models]
UpperCAmelCase_ : Dict = [tf_models[t] for t in all_models]
UpperCAmelCase_ : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCAmelCase_ : int = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCAmelCase_ : Any = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCAmelCase_ : Union[str, Any] = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCAmelCase_ : int = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCAmelCase_ : Dict = '''AutoTokenizer'''
UpperCAmelCase_ : str = [processors[t] for t in all_models]
return pd.DataFrame(A )
def __UpperCAmelCase ( A : Optional[int] ) -> str:
UpperCAmelCase_ : int = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCAmelCase_ : Tuple = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
UpperCAmelCase_ : Tuple = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(A , A , A ):
# The type of pipeline may not exist in this framework
if not hasattr(A , A ):
continue
# First extract all model_names
UpperCAmelCase_ : List[str] = []
for name in getattr(A , A ).values():
if isinstance(A , A ):
model_names.append(A )
else:
model_names.extend(list(A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __UpperCAmelCase ( A : int , A : Any ) -> Tuple:
UpperCAmelCase_ : Tuple = get_frameworks_table()
UpperCAmelCase_ : Any = Dataset.from_pandas(A )
UpperCAmelCase_ : str = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=A )
UpperCAmelCase_ : Union[str, Any] = Dataset.from_json(A )
UpperCAmelCase_ : Optional[int] = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(A ) )
}
UpperCAmelCase_ : str = update_pipeline_and_auto_class_table(A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCAmelCase_ : Union[str, Any] = sorted(table.keys() )
UpperCAmelCase_ : Optional[Any] = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
UpperCAmelCase_ : Dict = Dataset.from_pandas(A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(A , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(A , '''pipeline_tags.json''' ) )
if commit_sha is not None:
UpperCAmelCase_ : List[str] = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
UpperCAmelCase_ : int = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=A , repo_type='''dataset''' , token=A , commit_message=A , )
def __UpperCAmelCase ( ) -> int:
UpperCAmelCase_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCAmelCase_ : List[str] = transformers_module.pipelines.SUPPORTED_TASKS
UpperCAmelCase_ : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
UpperCAmelCase_ : Optional[Any] = pipeline_tasks[key]['''pt''']
if isinstance(A , (list, tuple) ):
UpperCAmelCase_ : Dict = model[0]
UpperCAmelCase_ : Any = model.__name__
if model not in in_table.values():
missing.append(A )
if len(A ) > 0:
UpperCAmelCase_ : List[Any] = ''', '''.join(A )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_UpperCamelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 304
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__: Dict = logging.get_logger(__name__)
a__: Optional[int] = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''transfo-xl'''
__SCREAMING_SNAKE_CASE = ['''mems''']
__SCREAMING_SNAKE_CASE = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self,__lowerCamelCase=26_7735,__lowerCamelCase=[2_0000, 4_0000, 20_0000],__lowerCamelCase=1024,__lowerCamelCase=1024,__lowerCamelCase=16,__lowerCamelCase=64,__lowerCamelCase=4096,__lowerCamelCase=4,__lowerCamelCase=False,__lowerCamelCase=18,__lowerCamelCase=1600,__lowerCamelCase=1000,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=0,__lowerCamelCase=-1,__lowerCamelCase=True,__lowerCamelCase=0.1,__lowerCamelCase=0.0,__lowerCamelCase=True,__lowerCamelCase="normal",__lowerCamelCase=0.01,__lowerCamelCase=0.01,__lowerCamelCase=0.02,__lowerCamelCase=1E-5,__lowerCamelCase=0,**__lowerCamelCase,):
A__ = vocab_size
A__ = []
self.cutoffs.extend(_A )
if proj_share_all_but_first:
A__ = [False] + [True] * len(self.cutoffs )
else:
A__ = [False] + [False] * len(self.cutoffs )
A__ = d_model
A__ = d_embed
A__ = d_head
A__ = d_inner
A__ = div_val
A__ = pre_lnorm
A__ = n_layer
A__ = n_head
A__ = mem_len
A__ = same_length
A__ = attn_type
A__ = clamp_len
A__ = sample_softmax
A__ = adaptive
A__ = dropout
A__ = dropatt
A__ = untie_r
A__ = init
A__ = init_range
A__ = proj_init_std
A__ = init_std
A__ = layer_norm_epsilon
super().__init__(eos_token_id=_A,**_A )
@property
def UpperCamelCase ( self ):
# Message copied from Transformer-XL documentation
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCamelCase ( self,__lowerCamelCase ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 193
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
_UpperCamelCase : Optional[int] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_UpperCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
a_ = field(
default=UpperCamelCase , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase)} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
a_ = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
a_ = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class snake_case__ :
a_ = field(
default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
a_ = field(
default=UpperCamelCase , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."})
a_ = field(default=UpperCamelCase , metadata={"help": "Whether ot not to use whole word mask."})
a_ = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"})
a_ = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a_ = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."})
a_ = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
def __UpperCAmelCase ( A : DataTrainingArguments , A : PreTrainedTokenizer , A : bool = False , A : Optional[str] = None , ) -> List[Any]:
def _dataset(A : Dict , A : str=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=A , file_path=A , block_size=args.block_size , ref_path=A , )
return LineByLineTextDataset(tokenizer=A , file_path=A , block_size=args.block_size )
else:
return TextDataset(
tokenizer=A , file_path=A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=A , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(A ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __UpperCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCAmelCase_ : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
UpperCAmelCase_ : str = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
UpperCAmelCase_ : int = AutoModelWithLMHead.from_config(A )
model.resize_token_embeddings(len(A ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
UpperCAmelCase_ : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCAmelCase_ : Dict = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCAmelCase_ : str = (
get_dataset(A , tokenizer=A , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCAmelCase_ : Any = (
get_dataset(A , tokenizer=A , evaluate=A , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCAmelCase_ : Optional[int] = DataCollatorForPermutationLanguageModeling(
tokenizer=A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCAmelCase_ : Tuple = DataCollatorForWholeWordMask(
tokenizer=A , mlm_probability=data_args.mlm_probability )
else:
UpperCAmelCase_ : List[str] = DataCollatorForLanguageModeling(
tokenizer=A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase_ : Any = Trainer(
model=A , args=A , data_collator=A , train_dataset=A , eval_dataset=A , prediction_loss_only=A , )
# Training
if training_args.do_train:
UpperCAmelCase_ : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=A )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_ : Tuple = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ : Dict = trainer.evaluate()
UpperCAmelCase_ : Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
UpperCAmelCase_ : Optional[int] = {'''perplexity''': perplexity}
UpperCAmelCase_ : int = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(A , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , A , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(A )
return results
def __UpperCAmelCase ( A : Tuple ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 304
| 0
|
'''simple docstring'''
a_ : Tuple = [
9_99,
8_00,
7_99,
6_00,
5_99,
5_00,
4_00,
3_99,
3_77,
3_55,
3_33,
3_11,
2_88,
2_66,
2_44,
2_22,
2_00,
1_99,
1_77,
1_55,
1_33,
1_11,
88,
66,
44,
22,
0,
]
a_ : Tuple = [
9_99,
9_76,
9_52,
9_28,
9_05,
8_82,
8_58,
8_57,
8_10,
7_62,
7_15,
7_14,
5_72,
4_29,
4_28,
2_86,
2_85,
2_38,
1_90,
1_43,
1_42,
1_18,
95,
71,
47,
24,
0,
]
a_ : Dict = [
9_99,
9_88,
9_77,
9_66,
9_55,
9_44,
9_33,
9_22,
9_11,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_50,
3_00,
2_99,
2_66,
2_33,
2_00,
1_99,
1_79,
1_59,
1_40,
1_20,
1_00,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
a_ : int = [
9_99,
9_95,
9_92,
9_89,
9_85,
9_81,
9_78,
9_75,
9_71,
9_67,
9_64,
9_61,
9_57,
9_56,
9_51,
9_47,
9_42,
9_37,
9_33,
9_28,
9_23,
9_19,
9_14,
9_13,
9_08,
9_03,
8_97,
8_92,
8_87,
8_81,
8_76,
8_71,
8_70,
8_64,
8_58,
8_52,
8_46,
8_40,
8_34,
8_28,
8_27,
8_20,
8_13,
8_06,
7_99,
7_92,
7_85,
7_84,
7_77,
7_70,
7_63,
7_56,
7_49,
7_42,
7_41,
7_33,
7_24,
7_16,
7_07,
6_99,
6_98,
6_88,
6_77,
6_66,
6_56,
6_55,
6_45,
6_34,
6_23,
6_13,
6_12,
5_98,
5_84,
5_70,
5_69,
5_55,
5_41,
5_27,
5_26,
5_05,
4_84,
4_83,
4_62,
4_40,
4_39,
3_96,
3_95,
3_52,
3_51,
3_08,
3_07,
2_64,
2_63,
2_20,
2_19,
1_76,
1_32,
88,
44,
0,
]
a_ : List[str] = [
9_99,
9_97,
9_95,
9_92,
9_90,
9_88,
9_86,
9_84,
9_81,
9_79,
9_77,
9_75,
9_72,
9_70,
9_68,
9_66,
9_64,
9_61,
9_59,
9_57,
9_56,
9_54,
9_51,
9_49,
9_46,
9_44,
9_41,
9_39,
9_36,
9_34,
9_31,
9_29,
9_26,
9_24,
9_21,
9_19,
9_16,
9_14,
9_13,
9_10,
9_07,
9_05,
9_02,
8_99,
8_96,
8_93,
8_91,
8_88,
8_85,
8_82,
8_79,
8_77,
8_74,
8_71,
8_70,
8_67,
8_64,
8_61,
8_58,
8_55,
8_52,
8_49,
8_46,
8_43,
8_40,
8_37,
8_34,
8_31,
8_28,
8_27,
8_24,
8_21,
8_17,
8_14,
8_11,
8_08,
8_04,
8_01,
7_98,
7_95,
7_91,
7_88,
7_85,
7_84,
7_80,
7_77,
7_74,
7_70,
7_66,
7_63,
7_60,
7_56,
7_52,
7_49,
7_46,
7_42,
7_41,
7_37,
7_33,
7_30,
7_26,
7_22,
7_18,
7_14,
7_10,
7_07,
7_03,
6_99,
6_98,
6_94,
6_90,
6_85,
6_81,
6_77,
6_73,
6_69,
6_64,
6_60,
6_56,
6_55,
6_50,
6_46,
6_41,
6_36,
6_32,
6_27,
6_22,
6_18,
6_13,
6_12,
6_07,
6_02,
5_96,
5_91,
5_86,
5_80,
5_75,
5_70,
5_69,
5_63,
5_57,
5_51,
5_45,
5_39,
5_33,
5_27,
5_26,
5_19,
5_12,
5_05,
4_98,
4_91,
4_84,
4_83,
4_74,
4_66,
4_57,
4_49,
4_40,
4_39,
4_28,
4_18,
4_07,
3_96,
3_95,
3_81,
3_66,
3_52,
3_51,
3_30,
3_08,
3_07,
2_86,
2_64,
2_63,
2_42,
2_20,
2_19,
1_76,
1_75,
1_32,
1_31,
88,
44,
0,
]
a_ : Optional[int] = [
9_99,
9_91,
9_82,
9_74,
9_66,
9_58,
9_50,
9_41,
9_33,
9_25,
9_16,
9_08,
9_00,
8_99,
8_74,
8_50,
8_25,
8_00,
7_99,
7_00,
6_00,
5_00,
4_00,
3_00,
2_00,
1_00,
0,
]
a_ : List[Any] = [
9_99,
9_92,
9_85,
9_78,
9_71,
9_64,
9_57,
9_49,
9_42,
9_35,
9_28,
9_21,
9_14,
9_07,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_00,
2_99,
2_00,
1_99,
1_00,
99,
0,
]
a_ : int = [
9_99,
9_96,
9_92,
9_89,
9_85,
9_82,
9_79,
9_75,
9_72,
9_68,
9_65,
9_61,
9_58,
9_55,
9_51,
9_48,
9_44,
9_41,
9_38,
9_34,
9_31,
9_27,
9_24,
9_20,
9_17,
9_14,
9_10,
9_07,
9_03,
9_00,
8_99,
8_91,
8_84,
8_76,
8_69,
8_61,
8_53,
8_46,
8_38,
8_30,
8_23,
8_15,
8_08,
8_00,
7_99,
7_88,
7_77,
7_66,
7_55,
7_44,
7_33,
7_22,
7_11,
7_00,
6_99,
6_88,
6_77,
6_66,
6_55,
6_44,
6_33,
6_22,
6_11,
6_00,
5_99,
5_85,
5_71,
5_57,
5_42,
5_28,
5_14,
5_00,
4_99,
4_85,
4_71,
4_57,
4_42,
4_28,
4_14,
4_00,
3_99,
3_79,
3_59,
3_40,
3_20,
3_00,
2_99,
2_79,
2_59,
2_40,
2_20,
2_00,
1_99,
1_66,
1_33,
1_00,
99,
66,
33,
0,
]
| 75
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_UpperCamelCase : Optional[int] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class snake_case__ ( unittest.TestCase):
@classmethod
def A ( cls : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(_A )
@classmethod
def A ( cls : int ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : List[str] = FlaxBertModel(_A )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A , repo_id='''test-model-flax''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel(_A )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[int] = flatten_dict(modela.params )
UpperCAmelCase_ : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase_ : int = False
return models_are_equal
@require_flax
class snake_case__ ( unittest.TestCase):
def A ( self : Any ) -> Any:
UpperCAmelCase_ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Any = FlaxBertModel(_A )
UpperCAmelCase_ : Tuple = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) )
with self.assertRaises(_A ):
UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Tuple:
UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Tuple = FlaxBertModel(_A )
UpperCAmelCase_ : str = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) , max_shard_size='''10KB''' )
with self.assertRaises(_A ):
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Optional[int]:
UpperCAmelCase_ : int = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Optional[Any] = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
| 304
| 0
|
"""simple docstring"""
import string
def lowercase__ ( snake_case_ :str ):
for key in range(len(string.ascii_uppercase ) ):
__UpperCAmelCase = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCAmelCase = string.ascii_uppercase.find(snake_case_ )
__UpperCAmelCase = num - key
if num < 0:
__UpperCAmelCase = num + len(string.ascii_uppercase )
__UpperCAmelCase = translated + string.ascii_uppercase[num]
else:
__UpperCAmelCase = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def lowercase__ ( ):
__UpperCAmelCase = input('''Encrypted message: ''' )
__UpperCAmelCase = message.upper()
decrypt(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 332
|
'''simple docstring'''
_UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_UpperCamelCase : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
_UpperCamelCase : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 304
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : int =logging.get_logger(__name__)
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : Optional[int]=False ,lowerCamelCase_ : Union[str, Any]=False):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = '''backbone.''' if is_semantic else ''''''
lowerCAmelCase__ : List[str] = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight"""))
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias"""))
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight"""))
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias"""))
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight"""))
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias"""))
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight"""))
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias"""))
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight"""))
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias"""))
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", '''beit.embeddings.cls_token'''),
(f"""{prefix}patch_embed.proj.weight""", '''beit.embeddings.patch_embeddings.projection.weight'''),
(f"""{prefix}patch_embed.proj.bias""", '''beit.embeddings.patch_embeddings.projection.bias'''),
(f"""{prefix}pos_embed""", '''beit.embeddings.position_embeddings'''),
])
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
])
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
return rename_keys
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Optional[Any]=False ,lowerCamelCase_ : Tuple=False):
'''simple docstring'''
for i in range(config.num_hidden_layers):
lowerCAmelCase__ : Union[str, Any] = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
lowerCAmelCase__ : Dict = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""")
lowerCAmelCase__ : str = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""")
lowerCAmelCase__ : Optional[int] = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""")
lowerCAmelCase__ : Any = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Any = q_bias
lowerCAmelCase__ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCAmelCase__ : Dict = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""")
lowerCAmelCase__ : List[str] = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""")
lowerCAmelCase__ : Optional[Any] = gamma_a
lowerCAmelCase__ : Optional[Any] = gamma_a
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : int ,lowerCamelCase_ : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = dct.pop(lowerCamelCase_)
lowerCAmelCase__ : str = val
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ : Dict = Image.open(requests.get(lowerCamelCase_ ,stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : int ,lowerCamelCase_ : Tuple=False):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = False if '''rvlcdip''' in checkpoint_url else True
lowerCAmelCase__ : List[str] = BeitConfig(use_absolute_position_embeddings=lowerCamelCase_ ,use_mask_token=lowerCamelCase_)
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCAmelCase__ : Union[str, Any] = 1024
lowerCAmelCase__ : List[str] = 4096
lowerCAmelCase__ : Union[str, Any] = 24
lowerCAmelCase__ : List[str] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowerCAmelCase__ : Optional[int] = 16
lowerCAmelCase__ : Optional[Any] = '''huggingface/label-files'''
lowerCAmelCase__ : str = '''rvlcdip-id2label.json'''
lowerCAmelCase__ : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase_ ,lowerCamelCase_ ,repo_type='''dataset''') ,'''r'''))
lowerCAmelCase__ : Optional[Any] = {int(lowerCamelCase_): v for k, v in idalabel.items()}
lowerCAmelCase__ : int = idalabel
lowerCAmelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : List[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ ,map_location='''cpu''')['''model''']
lowerCAmelCase__ : str = create_rename_keys(lowerCamelCase_ ,has_lm_head=lowerCamelCase_)
for src, dest in rename_keys:
rename_key(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
read_in_q_k_v(lowerCamelCase_ ,lowerCamelCase_ ,has_lm_head=lowerCamelCase_)
# load HuggingFace model
lowerCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling(lowerCamelCase_) if has_lm_head else BeitForImageClassification(lowerCamelCase_)
model.eval()
model.load_state_dict(lowerCamelCase_)
# Check outputs on an image
lowerCAmelCase__ : Tuple = BeitImageProcessor(
size=config.image_size ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCamelCase_)
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : List[Any] = image_processor(images=lowerCamelCase_ ,return_tensors='''pt''')
lowerCAmelCase__ : Tuple = encoding['''pixel_values''']
lowerCAmelCase__ : str = model(lowerCamelCase_)
lowerCAmelCase__ : List[Any] = outputs.logits
# verify logits
lowerCAmelCase__ : Dict = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCamelCase_), "Shape of logits not as expected"
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f"""Saving model to {pytorch_dump_folder_path}""")
model.save_pretrained(lowerCamelCase_)
print(f"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
if has_lm_head:
lowerCAmelCase__ : str = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
lowerCAmelCase__ : Optional[int] = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ ,lowerCamelCase_) ,organization='''nielsr''' ,commit_message='''Add image processor''' ,use_temp_dir=lowerCamelCase_ ,)
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ ,lowerCamelCase_) ,organization='''nielsr''' ,commit_message='''Add model''' ,use_temp_dir=lowerCamelCase_ ,)
if __name__ == "__main__":
__snake_case : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
__snake_case : List[Any] =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 129
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __UpperCAmelCase ( A : List[str] , A : Any , A : Optional[int] , A : Optional[int] ) -> Optional[Any]:
if isinstance(A , A ):
UpperCAmelCase_ : Any = np.full((len(A ), sequence_length, 2) , A )
else:
UpperCAmelCase_ : int = np.full((len(A ), sequence_length) , A )
for i, tensor in enumerate(A ):
if padding_side == "right":
if isinstance(A , A ):
UpperCAmelCase_ : Tuple = tensor[:sequence_length]
else:
UpperCAmelCase_ : Dict = tensor[:sequence_length]
else:
if isinstance(A , A ):
UpperCAmelCase_ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase_ : int = tensor[:sequence_length]
return out_tensor.tolist()
def __UpperCAmelCase ( A : List[Any] ) -> str:
UpperCAmelCase_ : Dict = ord(A )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
UpperCAmelCase_ : Union[str, Any] = unicodedata.category(A )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class snake_case__ ( UpperCamelCase):
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = -100
a_ = "pt"
def A ( self : List[Any] , _A : Dict ) -> Tuple:
import torch
UpperCAmelCase_ : Dict = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase_ : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase_ : Tuple = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase_ : Any = torch.tensor(batch['''entity_ids'''] ).shape[1]
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase_ : Optional[Any] = [
list(_A ) + [self.label_pad_token_id] * (sequence_length - len(_A )) for label in labels
]
else:
UpperCAmelCase_ : Any = [
[self.label_pad_token_id] * (sequence_length - len(_A )) + list(_A ) for label in labels
]
UpperCAmelCase_ : Union[str, Any] = [feature['''ner_tags'''] for feature in features]
UpperCAmelCase_ : Union[str, Any] = padding_tensor(_A , -1 , _A , _A )
UpperCAmelCase_ : List[str] = [feature['''original_entity_spans'''] for feature in features]
UpperCAmelCase_ : int = padding_tensor(_A , (-1, -1) , _A , _A )
UpperCAmelCase_ : Union[str, Any] = {k: torch.tensor(_A , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 304
| 0
|
def _A ( _lowercase = 10_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 1, 1
__UpperCamelCase = []
for i in range(1 , n + 1 ):
__UpperCamelCase = prev_numerator + 2 * prev_denominator
__UpperCamelCase = prev_numerator + prev_denominator
if len(str(_lowercase ) ) > len(str(_lowercase ) ):
result.append(_lowercase )
__UpperCamelCase = numerator
__UpperCamelCase = denominator
return len(_lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 310
|
'''simple docstring'''
import functools
def __UpperCAmelCase ( A : str , A : str ) -> int:
UpperCAmelCase_ : Optional[Any] = len(A )
UpperCAmelCase_ : List[str] = len(A )
@functools.cache
def min_distance(A : int , A : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , A ) , 1 + min_distance(A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 0
|
def _lowercase ( UpperCamelCase_ ) -> list:
'''simple docstring'''
def merge(UpperCamelCase_ , UpperCamelCase_ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase_ ) <= 1:
return collection
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase_ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input("""Enter numbers separated by a comma:\n""").strip()
__snake_case = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 176
|
'''simple docstring'''
def __UpperCAmelCase ( A : int = 1_0_0_0 ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = 1, 1
UpperCAmelCase_ : Dict = []
for i in range(1 , n + 1 ):
UpperCAmelCase_ : Optional[int] = prev_numerator + 2 * prev_denominator
UpperCAmelCase_ : Tuple = prev_numerator + prev_denominator
if len(str(A ) ) > len(str(A ) ):
result.append(A )
UpperCAmelCase_ : Optional[Any] = numerator
UpperCAmelCase_ : Optional[int] = denominator
return len(A )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 304
| 0
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ = {'UserAgent': UserAgent().random}
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> dict:
UpperCamelCase__ : Tuple = script.contents[0]
UpperCamelCase__ : Any = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = f"https://www.instagram.com/{username}/"
UpperCamelCase__ : Optional[int] = self.get_json()
def UpperCamelCase__ ( self ) -> dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = requests.get(self.url, headers=_A ).text
UpperCamelCase__ : Tuple = BeautifulSoup(_A, '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
"""simple docstring"""
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self ) -> str:
"""simple docstring"""
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self.user_data["username"]
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self.user_data["full_name"]
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self.user_data["biography"]
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self.user_data["business_email"]
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self.user_data["external_url"]
@property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def UpperCamelCase__ ( self ) -> bool:
"""simple docstring"""
return self.user_data["is_verified"]
@property
def UpperCamelCase__ ( self ) -> bool:
"""simple docstring"""
return self.user_data["is_private"]
def lowerCAmelCase_ ( __UpperCAmelCase: str = "github" ) -> None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
UpperCamelCase__ : List[str] = InstagramUser(__UpperCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __UpperCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = InstagramUser('github')
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 201
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case__ ( unittest.TestCase):
def __init__( self : int , _A : List[str] , _A : Dict=7 , _A : List[str]=3 , _A : List[str]=18 , _A : Dict=30 , _A : Union[str, Any]=4_00 , _A : List[str]=True , _A : List[str]=None , _A : int=True , _A : Tuple=None , _A : Union[str, Any]=True , _A : Tuple=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Tuple=False , ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Optional[Any] = do_center_crop
UpperCAmelCase_ : Optional[int] = crop_size
UpperCAmelCase_ : Tuple = do_normalize
UpperCAmelCase_ : Optional[Any] = image_mean
UpperCAmelCase_ : int = image_std
UpperCAmelCase_ : List[Any] = do_reduce_labels
def A ( self : Union[str, Any] ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : Optional[Any] = Image.open(dataset[0]['''file'''] )
UpperCAmelCase_ : str = Image.open(dataset[1]['''file'''] )
return image, map
def __UpperCAmelCase ( ) -> Any:
UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : int = Image.open(ds[0]['''file'''] )
UpperCAmelCase_ : Optional[Any] = Image.open(ds[1]['''file'''] )
UpperCAmelCase_ : Dict = Image.open(ds[2]['''file'''] )
UpperCAmelCase_ : List[str] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = BeitImageProcessor if is_vision_available() else None
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = BeitImageProcessingTester(self )
@property
def A ( self : List[Any] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''center_crop''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
def A ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , _A )
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , _A )
def A ( self : Optional[Any] ) -> Any:
pass
def A ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[int] ) -> str:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Any ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
UpperCAmelCase_ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : List[str] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_semantic_batch_inputs()
UpperCAmelCase_ : int = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def A ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 304
| 0
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[str] = np.shape(UpperCAmelCase_ )
if rows != columns:
_UpperCamelCase : Tuple = (
'''\'table\' has to be of square shaped array but got a '''
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = np.zeros((rows, columns) )
_UpperCamelCase : int = np.zeros((rows, columns) )
for i in range(UpperCAmelCase_ ):
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(UpperCAmelCase_ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
_UpperCamelCase : str = (table[i][j] - total) / upper[j][j]
_UpperCamelCase : str = 1
for j in range(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(UpperCAmelCase_ ) )
_UpperCamelCase : List[str] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83
|
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class snake_case__ ( enum.Enum):
a_ = 0
a_ = 1
a_ = 2
@add_end_docstrings(UpperCamelCase)
class snake_case__ ( UpperCamelCase):
a_ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : List[str] , *_A : Dict , **_A : int ) -> Optional[int]:
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase_ : Dict = None
if self.model.config.prefix is not None:
UpperCAmelCase_ : Tuple = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase_ : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._sanitize_parameters(prefix=_A , **self._forward_params )
UpperCAmelCase_ : int = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase_ : List[str] = {**self._forward_params, **forward_params}
def A ( self : Union[str, Any] , _A : int=None , _A : str=None , _A : Union[str, Any]=None , _A : List[Any]=None , _A : List[Any]=None , _A : int=None , _A : Optional[int]=None , _A : List[Any]=None , **_A : List[Any] , ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = {}
if prefix is not None:
UpperCAmelCase_ : List[Any] = prefix
if prefix:
UpperCAmelCase_ : Tuple = self.tokenizer(
_A , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase_ : List[Any] = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
''' [None, \'hole\']''' )
UpperCAmelCase_ : Union[str, Any] = handle_long_generation
preprocess_params.update(_A )
UpperCAmelCase_ : Optional[int] = generate_kwargs
UpperCAmelCase_ : Tuple = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase_ : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase_ : List[Any] = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase_ : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ : Any = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase_ : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def A ( self : Dict , *_A : Optional[Any] , **_A : Any ) -> Any:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_A , **_A )
def __call__( self : List[Any] , _A : Union[str, Any] , **_A : List[str] ) -> Dict:
return super().__call__(_A , **_A )
def A ( self : List[Any] , _A : List[Any] , _A : Any="" , _A : Dict=None , **_A : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.tokenizer(
prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase_ : str = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase_ : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase_ : Optional[int] = generate_kwargs['''max_new_tokens''']
else:
UpperCAmelCase_ : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase_ : Dict = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
UpperCAmelCase_ : List[str] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase_ : Optional[int] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def A ( self : List[str] , _A : Optional[Any] , **_A : str ) -> Optional[int]:
UpperCAmelCase_ : Any = model_inputs['''input_ids''']
UpperCAmelCase_ : Dict = model_inputs.get('''attention_mask''' , _A )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = 1
else:
UpperCAmelCase_ : Optional[int] = input_ids.shape[0]
UpperCAmelCase_ : Dict = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase_ : List[str] = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
UpperCAmelCase_ : str = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase_ : Any = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase_ : Optional[Any] = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase_ : Union[str, Any] = self.model.generate(input_ids=_A , attention_mask=_A , **_A )
UpperCAmelCase_ : Any = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase_ : List[str] = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase_ : int = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def A ( self : int , _A : List[Any] , _A : Dict=ReturnType.FULL_TEXT , _A : Dict=True ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = model_outputs['''generated_sequence'''][0]
UpperCAmelCase_ : int = model_outputs['''input_ids''']
UpperCAmelCase_ : str = model_outputs['''prompt_text''']
UpperCAmelCase_ : Any = generated_sequence.numpy().tolist()
UpperCAmelCase_ : int = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ : Optional[Any] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase_ : Any = self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase_ : List[str] = 0
else:
UpperCAmelCase_ : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase_ : Dict = prompt_text + text[prompt_length:]
else:
UpperCAmelCase_ : Dict = text[prompt_length:]
UpperCAmelCase_ : List[str] = {'''generated_text''': all_text}
records.append(_A )
return records
| 304
| 0
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
snake_case : List[str] = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowerCAmelCase_ ( _snake_case : str = "dhaka" , _snake_case : int = 5 ) -> int:
'''simple docstring'''
__magic_name__ : Union[str, Any] = min(_snake_case , 50 ) # Prevent abuse!
__magic_name__ : List[Any] = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
__magic_name__ : Tuple = requests.get("https://www.google.com/search" , params=_snake_case , headers=_snake_case )
__magic_name__ : Dict = BeautifulSoup(html.text , "html.parser" )
__magic_name__ : List[str] = ''''''.join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
__magic_name__ : Optional[int] = json.dumps(_snake_case )
__magic_name__ : Optional[Any] = json.loads(_snake_case )
__magic_name__ : int = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , _snake_case , )
if not matched_google_image_data:
return 0
__magic_name__ : int = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(_snake_case ) , )
__magic_name__ : Optional[Any] = re.findall(
R"(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , _snake_case , )
for index, fixed_full_res_image in enumerate(_snake_case ):
if index >= max_images:
return index
__magic_name__ : Optional[Any] = bytes(_snake_case , "ascii" ).decode(
"unicode-escape" )
__magic_name__ : Optional[int] = bytes(_snake_case , "ascii" ).decode(
"unicode-escape" )
__magic_name__ : Dict = urllib.request.build_opener()
__magic_name__ : int = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(_snake_case )
__magic_name__ : Tuple = F'''query_{query.replace(" " , "_" )}'''
if not os.path.exists(_snake_case ):
os.makedirs(_snake_case )
urllib.request.urlretrieve( # noqa: S310
_snake_case , F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
snake_case : Tuple = download_images_from_google_query(sys.argv[1])
print(F"{image_count} images were downloaded to disk.")
except IndexError:
print("Please provide a search term.")
raise
| 281
|
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCAmelCase ( A : int , A : int , A : bool , A : list[int] , A : float ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
)
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : List[str] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
UpperCAmelCase_ : List[Any] = math.log(len(A ) , 2 )
print(F"Optimal value : {minimax(0 , 0 , A , A , A )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 304
| 0
|
import math
from datetime import datetime, timedelta
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> datetime:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = year % 19
SCREAMING_SNAKE_CASE__ = year % 4
SCREAMING_SNAKE_CASE__ = year % 7
SCREAMING_SNAKE_CASE__ = math.floor(year / 1_00 )
SCREAMING_SNAKE_CASE__ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
SCREAMING_SNAKE_CASE__ = leap_day_inhibits / 4
SCREAMING_SNAKE_CASE__ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
SCREAMING_SNAKE_CASE__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
SCREAMING_SNAKE_CASE__ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
SCREAMING_SNAKE_CASE__ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 18 )
else:
return datetime(__UpperCamelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
__lowerCamelCase : List[str] = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 219
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : list , A : int , A : int , A : int ) -> list:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase_ : List[Any] = result + left + right
return input_list
def __UpperCAmelCase ( A : list ) -> list:
if len(A ) <= 1:
return input_list
UpperCAmelCase_ : List[str] = list(A )
# iteration for two-way merging
UpperCAmelCase_ : Tuple = 2
while p <= len(A ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(A ) , A ):
UpperCAmelCase_ : Union[str, Any] = i
UpperCAmelCase_ : int = i + p - 1
UpperCAmelCase_ : Any = (low + high + 1) // 2
UpperCAmelCase_ : Union[str, Any] = merge(A , A , A , A )
# final merge of last two parts
if p * 2 >= len(A ):
UpperCAmelCase_ : str = i
UpperCAmelCase_ : Tuple = merge(A , 0 , A , len(A ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
_UpperCamelCase : List[str] = []
else:
_UpperCamelCase : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 304
| 0
|
'''simple docstring'''
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 318
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42
a_ = 42
a_ = 42
a_ = 42
a_ = 42
def A ( self : Tuple ) -> Optional[int]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def A ( self : List[Any] ) -> Union[str, Any]:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def A ( self : Any ) -> Optional[Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def A ( self : Optional[int] ) -> torch.Tensor:
UpperCAmelCase_ : Dict = torch.arange(self.height * self.width )
UpperCAmelCase_ : int = torch.stack(
[
pixel_indices % self.width,
torch.div(_A , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ , *UpperCAmelCase_ : Union[str, Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(_A ) )
UpperCAmelCase_ : Any = self.get_image_coords()
UpperCAmelCase_ : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Union[str, Any] = self.get_camera_rays(_A )
UpperCAmelCase_ : str = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def A ( self : Optional[int] , _A : torch.Tensor ) -> torch.Tensor:
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : Dict = coords.view(_A , -1 , 2 )
UpperCAmelCase_ : Union[str, Any] = self.resolution()
UpperCAmelCase_ : int = self.fov()
UpperCAmelCase_ : Dict = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[int] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Any = fracs.view(_A , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(_A , 1 , 3 )
+ self.x.view(_A , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=_A )
UpperCAmelCase_ : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_A , *_A , 2 , 3 )
def A ( self : Tuple , _A : int , _A : int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , )
def __UpperCAmelCase ( A : int ) -> DifferentiableProjectiveCamera:
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : str = np.array([np.sin(A ), np.cos(A ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : Optional[int] = -z * 4
UpperCAmelCase_ : Optional[int] = np.array([np.cos(A ), -np.sin(A ), 0.0] )
UpperCAmelCase_ : List[Any] = np.cross(A , A )
origins.append(A )
xs.append(A )
ys.append(A )
zs.append(A )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(A , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A , axis=0 ) ).float() , width=A , height=A , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A )) , )
| 304
| 0
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = DDIMPipeline
__SCREAMING_SNAKE_CASE = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
__SCREAMING_SNAKE_CASE = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ):
torch.manual_seed(0 )
A__ = UNetaDModel(
block_out_channels=(32, 64),layers_per_block=2,sample_size=32,in_channels=3,out_channels=3,down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''),up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''),)
A__ = DDIMScheduler()
A__ = {'''unet''': unet, '''scheduler''': scheduler}
return components
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=0 ):
if str(_A ).startswith('''mps''' ):
A__ = torch.manual_seed(_A )
else:
A__ = torch.Generator(device=_A ).manual_seed(_A )
A__ = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self ):
A__ = '''cpu'''
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
A__ = self.get_dummy_inputs(_A )
A__ = pipe(**_A ).images
A__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 32, 32, 3) )
A__ = np.array(
[1.0_00E00, 5.7_17E-01, 4.7_17E-01, 1.0_00E00, 0.0_00E00, 1.0_00E00, 3.0_00E-04, 0.0_00E00, 9.0_00E-04] )
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def UpperCamelCase ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCamelCase ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCamelCase ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = '''google/ddpm-cifar10-32'''
A__ = UNetaDModel.from_pretrained(_A )
A__ = DDIMScheduler()
A__ = DDIMPipeline(unet=_A,scheduler=_A )
ddim.to(_A )
ddim.set_progress_bar_config(disable=_A )
A__ = torch.manual_seed(0 )
A__ = ddim(generator=_A,eta=0.0,output_type='''numpy''' ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
A__ = '''google/ddpm-ema-bedroom-256'''
A__ = UNetaDModel.from_pretrained(_A )
A__ = DDIMScheduler.from_pretrained(_A )
A__ = DDIMPipeline(unet=_A,scheduler=_A )
ddpm.to(_A )
ddpm.set_progress_bar_config(disable=_A )
A__ = torch.manual_seed(0 )
A__ = ddpm(generator=_A,output_type='''numpy''' ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A__ = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 193
|
'''simple docstring'''
import random
class snake_case__ :
@staticmethod
def A ( _A : str ) -> tuple[list[int], list[int]]:
UpperCAmelCase_ : Dict = [ord(_A ) for i in text]
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Any = []
for i in plain:
UpperCAmelCase_ : int = random.randint(1 , 3_00 )
UpperCAmelCase_ : str = (i + k) * k
cipher.append(_A )
key.append(_A )
return cipher, key
@staticmethod
def A ( _A : list[int] , _A : list[int] ) -> str:
UpperCAmelCase_ : Dict = []
for i in range(len(_A ) ):
UpperCAmelCase_ : int = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_A ) )
return "".join(_A )
if __name__ == "__main__":
_UpperCamelCase , _UpperCamelCase : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 304
| 0
|
'''simple docstring'''
import os
def a_ ( ) -> int:
"""simple docstring"""
lowerCamelCase_ =os.path.join(os.path.dirname(__snake_case ) , '''num.txt''' )
with open(__snake_case ) as file_hand:
return str(sum(int(__snake_case ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 75
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = ReformerTokenizer
a_ = ReformerTokenizerFast
a_ = True
a_ = False
a_ = True
def A ( self : Optional[Any] ) -> List[Any]:
super().setUp()
UpperCAmelCase_ : Tuple = ReformerTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : List[Any] = '''<s>'''
UpperCAmelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_A ) , 10_00 )
def A ( self : Optional[int] ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def A ( self : Optional[Any] ) -> List[Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ : Any = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize(_A )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : List[str] = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase_ : int = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = tokenizer.encode(_A )
UpperCAmelCase_ : List[str] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def A ( self : Tuple , _A : Dict=15 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(_A , **_A )
# Simple input
UpperCAmelCase_ : Optional[int] = '''This is a simple input'''
UpperCAmelCase_ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCAmelCase_ : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
UpperCAmelCase_ : Dict = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
def A ( self : Union[str, Any] ) -> int:
pass
def A ( self : int ) -> Any:
UpperCAmelCase_ : Any = ReformerTokenizer(_A , keep_accents=_A )
UpperCAmelCase_ : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ : List[str] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def A ( self : List[str] ) -> Optional[int]:
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : Tuple = '''Hello World!'''
UpperCAmelCase_ : int = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def A ( self : List[Any] ) -> str:
UpperCAmelCase_ : Tuple = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
UpperCAmelCase_ : int = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def A ( self : List[str] ) -> Optional[int]:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
UpperCAmelCase_ : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ : List[Any] = ''' '''.join(_A )
UpperCAmelCase_ : str = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' )
UpperCAmelCase_ : Any = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
UpperCAmelCase_ : List[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
UpperCAmelCase_ : Any = encoded_sequence['''input_ids'''].shape
UpperCAmelCase_ : Optional[int] = ReformerModel(_A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def A ( self : int ) -> Optional[Any]:
# fmt: off
UpperCAmelCase_ : int = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
UpperCAmelCase_ : Optional[Any] = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=_A , sequences=_A , )
| 304
| 0
|
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class _UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : Dict , _lowercase : Dict , _lowercase : str , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Optional[Any]=1 , _lowercase : Any=False , **_lowercase : Optional[Any] ):
super().__init__(**_A )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = d_embed
__UpperCAmelCase = d_proj
__UpperCAmelCase = cutoffs + [vocab_size]
__UpperCAmelCase = [0] + self.cutoffs
__UpperCAmelCase = div_val
__UpperCAmelCase = self.cutoffs[0]
__UpperCAmelCase = len(self.cutoffs ) - 1
__UpperCAmelCase = self.shortlist_size + self.n_clusters
__UpperCAmelCase = keep_order
__UpperCAmelCase = []
__UpperCAmelCase = []
def a ( self : str , _lowercase : str ):
if self.n_clusters > 0:
__UpperCAmelCase = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=_A , name='''cluster_weight''' )
__UpperCAmelCase = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=_A , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__UpperCAmelCase = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=_A , name=F'''out_projs_._{i}''' , )
self.out_projs.append(_A )
else:
self.out_projs.append(_A )
__UpperCAmelCase = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=_A , name=F'''out_layers_._{i}_._weight''' , )
__UpperCAmelCase = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=_A , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__UpperCAmelCase = self.d_embed // (self.div_val**i)
__UpperCAmelCase = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=_A , name=F'''out_projs_._{i}''' )
self.out_projs.append(_A )
__UpperCAmelCase = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=_A , name=F'''out_layers_._{i}_._weight''' , )
__UpperCAmelCase = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=_A , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(_A )
@staticmethod
def a ( _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : List[Any] , _lowercase : Union[str, Any]=None ):
__UpperCAmelCase = x
if proj is not None:
__UpperCAmelCase = tf.einsum('''ibd,ed->ibe''' , _A , _A )
return tf.einsum('''ibd,nd->ibn''' , _A , _A ) + b
@staticmethod
def a ( _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
__UpperCAmelCase = shape_list(_A )
__UpperCAmelCase = tf.range(lp_size[0] , dtype=target.dtype )
__UpperCAmelCase = tf.stack([r, target] , 1 )
return tf.gather_nd(_A , _A )
def a ( self : int , _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Any=True , _lowercase : Optional[Any]=False ):
__UpperCAmelCase = 0
if self.n_clusters == 0:
__UpperCAmelCase = self._logit(_A , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__UpperCAmelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_A , logits=_A )
__UpperCAmelCase = tf.nn.log_softmax(_A , axis=-1 )
else:
__UpperCAmelCase = shape_list(_A )
__UpperCAmelCase = []
__UpperCAmelCase = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__UpperCAmelCase = (target >= l_idx) & (target < r_idx)
__UpperCAmelCase = tf.where(_A )
__UpperCAmelCase = tf.boolean_mask(_A , _A ) - l_idx
if self.div_val == 1:
__UpperCAmelCase = self.out_layers[0][0][l_idx:r_idx]
__UpperCAmelCase = self.out_layers[0][1][l_idx:r_idx]
else:
__UpperCAmelCase = self.out_layers[i][0]
__UpperCAmelCase = self.out_layers[i][1]
if i == 0:
__UpperCAmelCase = tf.concat([cur_W, self.cluster_weight] , 0 )
__UpperCAmelCase = tf.concat([cur_b, self.cluster_bias] , 0 )
__UpperCAmelCase = self._logit(_A , _A , _A , self.out_projs[0] )
__UpperCAmelCase = tf.nn.log_softmax(_A )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__UpperCAmelCase = tf.boolean_mask(_A , _A )
__UpperCAmelCase = self._gather_logprob(_A , _A )
else:
__UpperCAmelCase = self._logit(_A , _A , _A , self.out_projs[i] )
__UpperCAmelCase = tf.nn.log_softmax(_A )
__UpperCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
__UpperCAmelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_A )
if target is not None:
__UpperCAmelCase = tf.boolean_mask(_A , _A )
__UpperCAmelCase = tf.boolean_mask(_A , _A )
__UpperCAmelCase = self._gather_logprob(_A , _A )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_A , -cur_logprob , shape_list(_A ) )
__UpperCAmelCase = tf.concat(_A , axis=-1 )
if target is not None:
if return_mean:
__UpperCAmelCase = tf.reduce_mean(_A )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_A )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_A , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 332
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : str ) -> list[int]:
return [ord(A ) - 9_6 for elem in plain]
def __UpperCAmelCase ( A : list[int] ) -> str:
return "".join(chr(elem + 9_6 ) for elem in encoded )
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : Tuple = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , A )
print('''Decoded:''' , decode(A ) )
if __name__ == "__main__":
main()
| 304
| 0
|
import numpy as np
from transformers import Pipeline
def lowerCAmelCase__ ( lowerCamelCase_ : List[str]):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.max(lowerCamelCase_ ,axis=-1 ,keepdims=lowerCamelCase_)
lowerCAmelCase__ : Dict = np.exp(outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=lowerCamelCase_)
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : int = {}
if "second_text" in kwargs:
lowerCAmelCase__ : List[Any] = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=None ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer(_A ,text_pair=_A ,return_tensors=self.framework )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
return self.model(**_A )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = model_outputs.logits[0].numpy()
lowerCAmelCase__ : List[Any] = softmax(_A )
lowerCAmelCase__ : Tuple = np.argmax(_A )
lowerCAmelCase__ : List[Any] = self.model.config.idalabel[best_class]
lowerCAmelCase__ : int = probabilities[best_class].item()
lowerCAmelCase__ : Any = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 129
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 304
| 0
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
__snake_case = 5
__snake_case = 1_0
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = SpeechaTextTokenizer
_lowercase = False
_lowercase = True
def snake_case_ ( self: str ):
'''simple docstring'''
super().setUp()
__UpperCamelCase = sp.SentencePieceProcessor()
spm_model.Load(_A )
__UpperCamelCase = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_A ) )]
__UpperCamelCase = dict(zip(_A,range(len(_A ) ) ) )
__UpperCamelCase = Path(self.tmpdirname )
save_json(_A,save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_A,save_dir / VOCAB_FILES_NAMES['spm_file'] )
__UpperCamelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = '''<pad>'''
__UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ),_A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ),_A )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],'<s>' )
self.assertEqual(vocab_keys[1],'<pad>' )
self.assertEqual(vocab_keys[-1],'j' )
self.assertEqual(len(_A ),1001 )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size,1001 )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__UpperCamelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_A,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ),[289, 50, 14, 174, 386],)
__UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_A,[SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'],)
__UpperCamelCase = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(_A,[12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A,[SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'],)
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = {'''input_ids''': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A,model_name='facebook/s2t-small-mustc-en-de-st',revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad',)
@require_sentencepiece
class __lowerCamelCase (unittest.TestCase ):
_lowercase = """valhalla/s2t_mustc_multilinguial_medium"""
_lowercase = """C'est trop cool"""
_lowercase = """Esto es genial"""
@classmethod
def snake_case_ ( cls: str ):
'''simple docstring'''
__UpperCamelCase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id['pt'],4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'],6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'],9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'],11 )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size,1_0000 )
def snake_case_ ( self: int ):
'''simple docstring'''
self.assertIn(_A,self.tokenizer.all_special_ids )
__UpperCamelCase = [ES_CODE, 4, 1601, 47, 7647, 2]
__UpperCamelCase = self.tokenizer.decode(_A,skip_special_tokens=_A )
__UpperCamelCase = self.tokenizer.decode(generated_ids[1:],skip_special_tokens=_A )
self.assertEqual(_A,_A )
self.assertNotIn(self.tokenizer.eos_token,_A )
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = '''fr'''
__UpperCamelCase = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0],_A )
self.assertEqual(encoded[-1],self.tokenizer.eos_token_id )
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens,[FR_CODE] )
__UpperCamelCase = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens,[ES_CODE] )
| 310
|
'''simple docstring'''
def __UpperCAmelCase ( A : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
UpperCAmelCase_ : int = gray_code_sequence_string(A )
#
# convert them to integers
for i in range(len(A ) ):
UpperCAmelCase_ : List[str] = int(sequence[i] , 2 )
return sequence
def __UpperCAmelCase ( A : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCAmelCase_ : Tuple = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCAmelCase_ : List[str] = gray_code_sequence_string(bit_count - 1 )
UpperCAmelCase_ : int = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCAmelCase_ : Union[str, Any] = '''0''' + smaller_sequence[i]
sequence.append(A )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCAmelCase_ : Dict = '''1''' + smaller_sequence[i]
sequence.append(A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 0
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__snake_case = logging.get_logger("""transformers.models.speecht5""")
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE__ = checkpoint['''input_conv.weight_g''']
SCREAMING_SNAKE_CASE__ = checkpoint['''input_conv.weight_v''']
SCREAMING_SNAKE_CASE__ = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE__ = checkpoint[F'upsamples.{i}.1.weight_g']
SCREAMING_SNAKE_CASE__ = checkpoint[F'upsamples.{i}.1.weight_v']
SCREAMING_SNAKE_CASE__ = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE__ = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
SCREAMING_SNAKE_CASE__ = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
SCREAMING_SNAKE_CASE__ = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
SCREAMING_SNAKE_CASE__ = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
SCREAMING_SNAKE_CASE__ = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
SCREAMING_SNAKE_CASE__ = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
SCREAMING_SNAKE_CASE__ = checkpoint['''output_conv.1.weight_g''']
SCREAMING_SNAKE_CASE__ = checkpoint['''output_conv.1.weight_v''']
SCREAMING_SNAKE_CASE__ = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , ) -> Dict:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE__ = SpeechTaHifiGanConfig.from_pretrained(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE__ = SpeechTaHifiGan(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase_ )
load_weights(orig_checkpoint['model']['generator'] , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = np.load(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE__ = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase_ ).float()
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase_ ).float()
model.save_pretrained(UpperCamelCase_ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 176
|
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_UpperCamelCase : Any = logging.getLogger(__name__)
class snake_case__ ( UpperCamelCase):
a_ = "masked_bert"
def __init__( self : str , _A : Dict=3_05_22 , _A : Dict=7_68 , _A : Union[str, Any]=12 , _A : str=12 , _A : str=30_72 , _A : Dict="gelu" , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : Any=5_12 , _A : Union[str, Any]=2 , _A : Union[str, Any]=0.02 , _A : int=1e-12 , _A : Any=0 , _A : Any="topK" , _A : List[str]="constant" , _A : Dict=0.0 , **_A : int , ) -> Union[str, Any]:
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = pruning_method
UpperCAmelCase_ : Optional[int] = mask_init
UpperCAmelCase_ : List[Any] = mask_scale
| 304
| 0
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : str = StableDiffusionDiffEditPipeline
a : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
a : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
a : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a : Optional[int] = frozenset([] )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ : str = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=_A, )
UpperCamelCase__ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=_A, set_alpha_to_one=_A, )
UpperCamelCase__ : Optional[int] = DDIMInverseScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=_A, set_alpha_to_zero=_A, )
torch.manual_seed(0 )
UpperCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
UpperCamelCase__ : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='''gelu''', projection_dim=512, )
UpperCamelCase__ : Union[str, Any] = CLIPTextModel(_A )
UpperCamelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=0 ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = floats_tensor((1, 16, 16), rng=random.Random(_A ) ).to(_A )
UpperCamelCase__ : Dict = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('''mps''' ):
UpperCamelCase__ : Any = torch.manual_seed(_A )
else:
UpperCamelCase__ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCamelCase__ : str = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=0 ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 32, 32), rng=random.Random(_A ) ).to(_A )
UpperCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
UpperCamelCase__ : int = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCamelCase__ : Dict = torch.manual_seed(_A )
else:
UpperCamelCase__ : Any = torch.Generator(device=_A ).manual_seed(_A )
UpperCamelCase__ : Optional[Any] = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=0 ) -> Any:
"""simple docstring"""
UpperCamelCase__ : str = floats_tensor((1, 3, 32, 32), rng=random.Random(_A ) ).to(_A )
UpperCamelCase__ : List[str] = image.cpu().permute(0, 2, 3, 1 )[0]
UpperCamelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCamelCase__ : Optional[int] = torch.manual_seed(_A )
else:
UpperCamelCase__ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCamelCase__ : Optional[int] = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
if not hasattr(self.pipeline_class, '''_optional_components''' ):
return
UpperCamelCase__ : str = self.get_dummy_components()
UpperCamelCase__ : Any = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_A, _A, _A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCamelCase__ : List[str] = self.get_dummy_inputs(_A )
UpperCamelCase__ : str = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCamelCase__ : Any = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A, _A ) is None, f"`{optional_component}` did not stay set to None after loading.", )
UpperCamelCase__ : Tuple = self.get_dummy_inputs(_A )
UpperCamelCase__ : List[Any] = pipe_loaded(**_A )[0]
UpperCamelCase__ : Any = np.abs(output - output_loaded ).max()
self.assertLess(_A, 1E-4 )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = '''cpu'''
UpperCamelCase__ : Any = self.get_dummy_components()
UpperCamelCase__ : Optional[int] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCamelCase__ : Union[str, Any] = self.get_dummy_mask_inputs(_A )
UpperCamelCase__ : int = pipe.generate_mask(**_A )
UpperCamelCase__ : Tuple = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
UpperCamelCase__ : List[Any] = np.array([0] * 9 )
UpperCamelCase__ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A, 1E-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = '''cpu'''
UpperCamelCase__ : str = self.get_dummy_components()
UpperCamelCase__ : str = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCamelCase__ : Optional[Any] = self.get_dummy_inversion_inputs(_A )
UpperCamelCase__ : Optional[Any] = pipe.invert(**_A ).images
UpperCamelCase__ : List[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
UpperCamelCase__ : int = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799], )
UpperCamelCase__ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A, 1E-3 )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = '''cpu'''
UpperCamelCase__ : Union[str, Any] = self.get_dummy_components()
UpperCamelCase__ : Any = {'''beta_start''': 0.0_0085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
UpperCamelCase__ : Any = DPMSolverMultistepScheduler(**_A )
UpperCamelCase__ : Optional[Any] = DPMSolverMultistepInverseScheduler(**_A )
UpperCamelCase__ : Union[str, Any] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCamelCase__ : Union[str, Any] = self.get_dummy_inversion_inputs(_A )
UpperCamelCase__ : Optional[Any] = pipe.invert(**_A ).images
UpperCamelCase__ : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
UpperCamelCase__ : List[Any] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799], )
UpperCamelCase__ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A, 1E-3 )
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase__ ( cls ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
UpperCamelCase__ : int = raw_image.convert('''RGB''' ).resize((768, 768) )
UpperCamelCase__ : Any = raw_image
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : int = torch.manual_seed(0 )
UpperCamelCase__ : str = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''', safety_checker=_A, torch_dtype=torch.floataa )
UpperCamelCase__ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCamelCase__ : List[str] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCamelCase__ : Optional[Any] = '''a bowl of fruit'''
UpperCamelCase__ : Tuple = '''a bowl of pears'''
UpperCamelCase__ : Optional[int] = pipe.generate_mask(
image=self.raw_image, source_prompt=_A, target_prompt=_A, generator=_A, )
UpperCamelCase__ : List[str] = pipe.invert(
prompt=_A, image=self.raw_image, inpaint_strength=0.7, generator=_A ).latents
UpperCamelCase__ : Any = pipe(
prompt=_A, mask_image=_A, image_latents=_A, generator=_A, negative_prompt=_A, inpaint_strength=0.7, output_type='''numpy''', ).images[0]
UpperCamelCase__ : str = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = torch.manual_seed(0 )
UpperCamelCase__ : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''', safety_checker=_A, torch_dtype=torch.floataa )
UpperCamelCase__ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCamelCase__ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCamelCase__ : Optional[Any] = '''a bowl of fruit'''
UpperCamelCase__ : Dict = '''a bowl of pears'''
UpperCamelCase__ : Union[str, Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=_A, target_prompt=_A, generator=_A, )
UpperCamelCase__ : List[Any] = pipe.invert(
prompt=_A, image=self.raw_image, inpaint_strength=0.7, generator=_A, num_inference_steps=25, ).latents
UpperCamelCase__ : Dict = pipe(
prompt=_A, mask_image=_A, image_latents=_A, generator=_A, negative_prompt=_A, inpaint_strength=0.7, num_inference_steps=25, output_type='''numpy''', ).images[0]
UpperCamelCase__ : Tuple = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 201
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = StableDiffusionDiffEditPipeline
a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
a_ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a_ = frozenset([])
def A ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
UpperCAmelCase_ : Optional[int] = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_zero=_A , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : str , _A : List[str] , _A : Any=0 ) -> str:
UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Any = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : str = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Tuple , _A : Optional[Any] , _A : Optional[Any]=0 ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : int = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Dict = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Any = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[Any] = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : int , _A : Tuple , _A : List[str]=0 ) -> Any:
UpperCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[int] = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : List[str] ) -> Optional[Any]:
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Any = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_A )
UpperCAmelCase_ : str = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase_ : Any = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"`{optional_component}` did not stay set to None after loading." , )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_A )
UpperCAmelCase_ : List[Any] = pipe_loaded(**_A )[0]
UpperCAmelCase_ : Any = np.abs(output - output_loaded ).max()
self.assertLess(_A , 1e-4 )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : Optional[Any] = '''cpu'''
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_mask_inputs(_A )
UpperCAmelCase_ : int = pipe.generate_mask(**_A )
UpperCAmelCase_ : Tuple = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase_ : List[Any] = np.array([0] * 9 )
UpperCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def A ( self : str ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = '''cpu'''
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : str = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : List[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : int = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
def A ( self : Tuple ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : Any = '''cpu'''
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
UpperCAmelCase_ : Any = DPMSolverMultistepScheduler(**_A )
UpperCAmelCase_ : Optional[Any] = DPMSolverMultistepInverseScheduler(**_A )
UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : List[Any] = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase):
def A ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def A ( cls : Dict ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
UpperCAmelCase_ : int = raw_image.convert('''RGB''' ).resize((7_68, 7_68) )
UpperCAmelCase_ : Any = raw_image
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : int = torch.manual_seed(0 )
UpperCAmelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : List[str] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Tuple = '''a bowl of pears'''
UpperCAmelCase_ : Optional[int] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[str] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A ).latents
UpperCAmelCase_ : Any = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : str = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
def A ( self : Tuple ) -> List[str]:
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Dict = '''a bowl of pears'''
UpperCAmelCase_ : Union[str, Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[Any] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A , num_inference_steps=25 , ).latents
UpperCAmelCase_ : Dict = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : Tuple = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 304
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 83
|
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( UpperCamelCase):
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase_ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class snake_case__ :
def __init__( self : List[Any] , _A : List[str] , _A : Optional[Any]=13 , _A : List[str]=64 , _A : Tuple=3 , _A : int=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Union[str, Any]=[1, 2, 10] , _A : List[Any]=[7, 3, 3] , _A : Optional[Any]=[4, 2, 2] , _A : List[Any]=[2, 1, 1] , _A : Union[str, Any]=[2, 2, 2] , _A : Tuple=[False, False, True] , _A : str=[0.0, 0.0, 0.0] , _A : List[Any]=0.02 , _A : int=1e-12 , _A : Optional[int]=True , _A : List[str]=True , _A : Union[str, Any]=2 , ) -> List[Any]:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Tuple = patch_sizes
UpperCAmelCase_ : int = patch_stride
UpperCAmelCase_ : Any = patch_padding
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : Tuple = stride_kv
UpperCAmelCase_ : Optional[Any] = depth
UpperCAmelCase_ : Dict = cls_token
UpperCAmelCase_ : Dict = attention_drop_rate
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ) -> int:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A ( self : Dict , _A : List[Any] , _A : Tuple , _A : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = CvtModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = model(_A )
UpperCAmelCase_ : List[str] = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase_ : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase_ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A ( self : Any , _A : int , _A : str , _A : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : str = CvtForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : int = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = config_and_inputs
UpperCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
a_ = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = CvtModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A ( self : Any ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : int ) -> List[str]:
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def A ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def A ( self : Any ) -> Optional[Any]:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def A ( self : List[Any] ) -> Any:
pass
def A ( self : int ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_A )
UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : Dict ) -> List[str]:
def check_hidden_states_output(_A : Dict , _A : str , _A : int ):
UpperCAmelCase_ : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase_ : Optional[Any] = outputs.hidden_states
UpperCAmelCase_ : Any = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Dict = True
check_hidden_states_output(_A , _A , _A )
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : List[Any] ) -> Optional[Any]:
pass
@slow
def A ( self : Optional[int] ) -> int:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = CvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase):
@cached_property
def A ( self : Union[str, Any] ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : str = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_A )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_A )
# verify the logits
UpperCAmelCase_ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 304
| 0
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = inspect.getfile(accelerate.test_utils )
__magic_name__ : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
__magic_name__ : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
__magic_name__ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__magic_name__ : Optional[Any] = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__magic_name__ : Union[str, Any] = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self ):
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
__magic_name__ : Optional[int] = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(_A , env=os.environ.copy() )
if __name__ == "__main__":
snake_case : Optional[Any] = Accelerator()
snake_case : str = (accelerator.state.process_index + 2, 10)
snake_case : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
snake_case : List[str] = ''
snake_case : Any = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
snake_case : List[str] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
snake_case : Tuple = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 281
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase)
class snake_case__ ( UpperCamelCase):
a_ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True})
a_ = Features({"text": Value("string")})
a_ = Features({})
a_ = "text"
@property
def A ( self : List[str] ) -> Dict[str, str]:
return {self.text_column: "text"}
| 304
| 0
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = k_size // 2
SCREAMING_SNAKE_CASE__ = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
SCREAMING_SNAKE_CASE__ = 1 / (2 * pi * sigma) * exp(-(square(__UpperCamelCase ) + square(__UpperCamelCase )) / (2 * square(__UpperCamelCase )) )
return g
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = image.shape[0], image.shape[1]
# dst image height and width
SCREAMING_SNAKE_CASE__ = height - k_size + 1
SCREAMING_SNAKE_CASE__ = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
SCREAMING_SNAKE_CASE__ = zeros((dst_height * dst_width, k_size * k_size) )
SCREAMING_SNAKE_CASE__ = 0
for i, j in product(range(__UpperCamelCase ) , range(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE__ = ravel(image[i : i + k_size, j : j + k_size] )
SCREAMING_SNAKE_CASE__ = window
row += 1
# turn the kernel into shape(k*k, 1)
SCREAMING_SNAKE_CASE__ = gen_gaussian_kernel(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = ravel(__UpperCamelCase )
# reshape and get the dst image
SCREAMING_SNAKE_CASE__ = dot(__UpperCamelCase , __UpperCamelCase ).reshape(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase )
return dst
if __name__ == "__main__":
# read original image
__lowerCamelCase : Any = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
__lowerCamelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__lowerCamelCase : Optional[Any] = gaussian_filter(gray, 3, sigma=1)
__lowerCamelCase : List[str] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 219
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __UpperCAmelCase ( A : int , A : Any="shi-labs/oneformer_demo" ) -> Dict:
with open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) as f:
UpperCAmelCase_ : Union[str, Any] = json.load(A )
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : str = []
for key, info in class_info.items():
UpperCAmelCase_ : Tuple = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(A ) )
UpperCAmelCase_ : Any = thing_ids
UpperCAmelCase_ : Union[str, Any] = class_names
return metadata
class snake_case__ ( unittest.TestCase):
def __init__( self : Any , _A : str , _A : Optional[int]=7 , _A : Tuple=3 , _A : Tuple=30 , _A : List[Any]=4_00 , _A : Tuple=None , _A : Optional[Any]=True , _A : Optional[Any]=True , _A : Any=[0.5, 0.5, 0.5] , _A : Any=[0.5, 0.5, 0.5] , _A : List[str]=10 , _A : Optional[int]=False , _A : Union[str, Any]=2_55 , _A : List[Any]="shi-labs/oneformer_demo" , _A : str="ade20k_panoptic.json" , _A : List[Any]=10 , ) -> Any:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Optional[int] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Tuple = {'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size
UpperCAmelCase_ : int = do_normalize
UpperCAmelCase_ : List[Any] = image_mean
UpperCAmelCase_ : Dict = image_std
UpperCAmelCase_ : str = class_info_file
UpperCAmelCase_ : Optional[Any] = prepare_metadata(_A , _A )
UpperCAmelCase_ : Tuple = num_text
UpperCAmelCase_ : Union[str, Any] = repo_path
# for the post_process_functions
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : Dict = 10
UpperCAmelCase_ : int = 10
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : str = 4
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Union[str, Any] = do_reduce_labels
UpperCAmelCase_ : str = ignore_index
def A ( self : Dict ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A ( self : Any , _A : List[Any] , _A : List[str]=False ) -> Optional[Any]:
if not batched:
UpperCAmelCase_ : Any = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ : Union[str, Any] = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase_ : int = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase_ : List[Any] = self.size['''shortest_edge''']
UpperCAmelCase_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase_ : Dict = self.size['''shortest_edge''']
UpperCAmelCase_ : str = self.size['''shortest_edge''']
else:
UpperCAmelCase_ : Dict = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ : int = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase_ : List[str] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
def A ( self : Tuple ) -> str:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ = image_processing_class
def A ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : int = OneFormerImageProcessorTester(self )
@property
def A ( self : Any ) -> int:
return self.image_processing_tester.prepare_image_processor_dict()
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''ignore_index''' ) )
self.assertTrue(hasattr(_A , '''class_info_file''' ) )
self.assertTrue(hasattr(_A , '''num_text''' ) )
self.assertTrue(hasattr(_A , '''repo_path''' ) )
self.assertTrue(hasattr(_A , '''metadata''' ) )
self.assertTrue(hasattr(_A , '''do_reduce_labels''' ) )
def A ( self : Dict ) -> Dict:
pass
def A ( self : Tuple ) -> Dict:
# Initialize image_processor
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : int = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Tuple:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Dict ) -> Union[str, Any]:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Optional[int] = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : int , _A : Any=False , _A : List[Any]=False , _A : Any="np" ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase_ : Tuple = self.image_processing_tester.num_labels
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
if with_segmentation_maps:
UpperCAmelCase_ : Any = num_labels
if is_instance_map:
UpperCAmelCase_ : Any = list(range(_A ) ) * 2
UpperCAmelCase_ : Optional[Any] = dict(enumerate(_A ) )
UpperCAmelCase_ : Dict = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase_ : Dict = [Image.fromarray(_A ) for annotation in annotations]
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , _A , return_tensors='''pt''' , instance_id_to_semantic_id=_A , pad_and_return_pixel_mask=_A , )
return inputs
def A ( self : int ) -> str:
pass
def A ( self : Tuple ) -> Union[str, Any]:
def common(_A : Optional[int]=False , _A : str=None ):
UpperCAmelCase_ : List[str] = self.comm_get_image_processor_inputs(
with_segmentation_maps=_A , is_instance_map=_A , segmentation_type=_A )
UpperCAmelCase_ : List[Any] = inputs['''mask_labels''']
UpperCAmelCase_ : Optional[Any] = inputs['''class_labels''']
UpperCAmelCase_ : int = inputs['''pixel_values''']
UpperCAmelCase_ : Tuple = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(_A , _A , _A ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_A ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_A )
common(is_instance_map=_A , segmentation_type='''pil''' )
common(is_instance_map=_A , segmentation_type='''pil''' )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : int = np.zeros((20, 50) )
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : List[Any] = binary_mask_to_rle(_A )
self.assertEqual(len(_A ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A ( self : Any ) -> List[Any]:
UpperCAmelCase_ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(_A )
self.assertEqual(len(_A ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase_ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase_ : Any = fature_extractor.post_process_semantic_segmentation(_A , target_sizes=_A )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_instance_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_panoptic_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 304
| 0
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def lowercase_ ( _lowercase ) -> Tuple:
'''simple docstring'''
return np.maximum(0 , _lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 318
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCamelCase : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_UpperCamelCase : str = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCamelCase : List[str] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __UpperCAmelCase ( A : Optional[int] ) -> int:
UpperCAmelCase_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , A )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Optional[Any] = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCAmelCase_ : Dict = collections.defaultdict(A )
UpperCAmelCase_ : str = collections.defaultdict(A )
UpperCAmelCase_ : int = collections.defaultdict(A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(A ):
UpperCAmelCase_ : int = None
if _re_tf_models.match(A ) is not None:
UpperCAmelCase_ : Optional[Any] = tf_models
UpperCAmelCase_ : Optional[int] = _re_tf_models.match(A ).groups()[0]
elif _re_flax_models.match(A ) is not None:
UpperCAmelCase_ : int = flax_models
UpperCAmelCase_ : Any = _re_flax_models.match(A ).groups()[0]
elif _re_pt_models.match(A ) is not None:
UpperCAmelCase_ : Union[str, Any] = pt_models
UpperCAmelCase_ : List[Any] = _re_pt_models.match(A ).groups()[0]
if lookup_dict is not None:
while len(A ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCAmelCase_ : Optional[int] = True
break
# Try again after removing the last word in the name
UpperCAmelCase_ : List[Any] = ''''''.join(camel_case_split(A )[:-1] )
UpperCAmelCase_ : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCAmelCase_ : List[Any] = list(A )
all_models.sort()
UpperCAmelCase_ : Dict = {'''model_type''': all_models}
UpperCAmelCase_ : Tuple = [pt_models[t] for t in all_models]
UpperCAmelCase_ : Dict = [tf_models[t] for t in all_models]
UpperCAmelCase_ : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCAmelCase_ : int = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCAmelCase_ : Any = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCAmelCase_ : Union[str, Any] = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCAmelCase_ : int = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCAmelCase_ : Dict = '''AutoTokenizer'''
UpperCAmelCase_ : str = [processors[t] for t in all_models]
return pd.DataFrame(A )
def __UpperCAmelCase ( A : Optional[int] ) -> str:
UpperCAmelCase_ : int = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCAmelCase_ : Tuple = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
UpperCAmelCase_ : Tuple = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(A , A , A ):
# The type of pipeline may not exist in this framework
if not hasattr(A , A ):
continue
# First extract all model_names
UpperCAmelCase_ : List[str] = []
for name in getattr(A , A ).values():
if isinstance(A , A ):
model_names.append(A )
else:
model_names.extend(list(A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __UpperCAmelCase ( A : int , A : Any ) -> Tuple:
UpperCAmelCase_ : Tuple = get_frameworks_table()
UpperCAmelCase_ : Any = Dataset.from_pandas(A )
UpperCAmelCase_ : str = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=A )
UpperCAmelCase_ : Union[str, Any] = Dataset.from_json(A )
UpperCAmelCase_ : Optional[int] = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(A ) )
}
UpperCAmelCase_ : str = update_pipeline_and_auto_class_table(A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCAmelCase_ : Union[str, Any] = sorted(table.keys() )
UpperCAmelCase_ : Optional[Any] = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
UpperCAmelCase_ : Dict = Dataset.from_pandas(A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(A , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(A , '''pipeline_tags.json''' ) )
if commit_sha is not None:
UpperCAmelCase_ : List[str] = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
UpperCAmelCase_ : int = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=A , repo_type='''dataset''' , token=A , commit_message=A , )
def __UpperCAmelCase ( ) -> int:
UpperCAmelCase_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCAmelCase_ : List[str] = transformers_module.pipelines.SUPPORTED_TASKS
UpperCAmelCase_ : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
UpperCAmelCase_ : Optional[Any] = pipeline_tasks[key]['''pt''']
if isinstance(A , (list, tuple) ):
UpperCAmelCase_ : Dict = model[0]
UpperCAmelCase_ : Any = model.__name__
if model not in in_table.values():
missing.append(A )
if len(A ) > 0:
UpperCAmelCase_ : List[Any] = ''', '''.join(A )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_UpperCamelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 304
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__: Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = ReformerTokenizer
__SCREAMING_SNAKE_CASE = ReformerTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
def UpperCamelCase ( self ):
super().setUp()
A__ = ReformerTokenizer(_A,keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
A__ = '''<s>'''
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ),_A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ),_A )
def UpperCamelCase ( self ):
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],'''<unk>''' )
self.assertEqual(vocab_keys[1],'''<s>''' )
self.assertEqual(vocab_keys[-1],'''j''' )
self.assertEqual(len(_A ),1000 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size,1000 )
def UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = '''I was born in 92000, and this is falsé.'''
A__ = tokenizer.tokenize(_A )
A__ = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A,_A )
A__ = tokenizer.encode(_A,add_special_tokens=_A )
A__ = rust_tokenizer.encode(_A,add_special_tokens=_A )
self.assertListEqual(_A,_A )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(_A )
A__ = rust_tokenizer.encode(_A )
self.assertListEqual(_A,_A )
def UpperCamelCase ( self,__lowerCamelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ = self.rust_tokenizer_class.from_pretrained(_A,**_A )
# Simple input
A__ = '''This is a simple input'''
A__ = ['''This is a simple input 1''', '''This is a simple input 2''']
A__ = ('''This is a simple input''', '''This is a pair''')
A__ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_A,tokenizer_r.encode,_A,max_length=_A,padding='''max_length''' )
# Simple input
self.assertRaises(_A,tokenizer_r.encode_plus,_A,max_length=_A,padding='''max_length''' )
# Simple input
self.assertRaises(
_A,tokenizer_r.batch_encode_plus,_A,max_length=_A,padding='''max_length''',)
# Pair input
self.assertRaises(_A,tokenizer_r.encode,_A,max_length=_A,padding='''max_length''' )
# Pair input
self.assertRaises(_A,tokenizer_r.encode_plus,_A,max_length=_A,padding='''max_length''' )
# Pair input
self.assertRaises(
_A,tokenizer_r.batch_encode_plus,_A,max_length=_A,padding='''max_length''',)
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
A__ = ReformerTokenizer(_A,keep_accents=_A )
A__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ),[285, 46, 10, 170, 382],)
A__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
],)
A__ = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4],)
A__ = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
],)
@cached_property
def UpperCamelCase ( self ):
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def UpperCamelCase ( self ):
A__ = '''Hello World!'''
A__ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(_A,self.big_tokenizer.encode(_A ) )
@slow
def UpperCamelCase ( self ):
A__ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
A__ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(_A,self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCamelCase ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
A__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
A__ = ''' '''.join(_A )
A__ = self.big_tokenizer.encode_plus(_A,return_tensors='''pt''' )
A__ = self.big_tokenizer.batch_encode_plus([sequence, sequence],return_tensors='''pt''' )
A__ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
A__ = encoded_sequence['''input_ids'''].shape
A__ = ReformerModel(_A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCamelCase ( self ):
# fmt: off
A__ = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
A__ = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_A,model_name='''google/reformer-crime-and-punishment''',revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''',padding=_A,sequences=_A,)
| 193
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
_UpperCamelCase : Optional[int] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_UpperCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
a_ = field(
default=UpperCamelCase , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase)} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
a_ = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
a_ = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class snake_case__ :
a_ = field(
default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
a_ = field(
default=UpperCamelCase , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."})
a_ = field(default=UpperCamelCase , metadata={"help": "Whether ot not to use whole word mask."})
a_ = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"})
a_ = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a_ = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."})
a_ = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
def __UpperCAmelCase ( A : DataTrainingArguments , A : PreTrainedTokenizer , A : bool = False , A : Optional[str] = None , ) -> List[Any]:
def _dataset(A : Dict , A : str=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=A , file_path=A , block_size=args.block_size , ref_path=A , )
return LineByLineTextDataset(tokenizer=A , file_path=A , block_size=args.block_size )
else:
return TextDataset(
tokenizer=A , file_path=A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=A , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(A ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __UpperCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCAmelCase_ : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
UpperCAmelCase_ : str = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
UpperCAmelCase_ : int = AutoModelWithLMHead.from_config(A )
model.resize_token_embeddings(len(A ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
UpperCAmelCase_ : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCAmelCase_ : Dict = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCAmelCase_ : str = (
get_dataset(A , tokenizer=A , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCAmelCase_ : Any = (
get_dataset(A , tokenizer=A , evaluate=A , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCAmelCase_ : Optional[int] = DataCollatorForPermutationLanguageModeling(
tokenizer=A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCAmelCase_ : Tuple = DataCollatorForWholeWordMask(
tokenizer=A , mlm_probability=data_args.mlm_probability )
else:
UpperCAmelCase_ : List[str] = DataCollatorForLanguageModeling(
tokenizer=A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase_ : Any = Trainer(
model=A , args=A , data_collator=A , train_dataset=A , eval_dataset=A , prediction_loss_only=A , )
# Training
if training_args.do_train:
UpperCAmelCase_ : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=A )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_ : Tuple = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ : Dict = trainer.evaluate()
UpperCAmelCase_ : Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
UpperCAmelCase_ : Optional[int] = {'''perplexity''': perplexity}
UpperCAmelCase_ : int = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(A , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , A , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(A )
return results
def __UpperCAmelCase ( A : Tuple ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 304
| 0
|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ):
"""simple docstring"""
super().__init__()
lowerCamelCase_ =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
lowerCamelCase_ =torch.zeros(_A, _A )
else:
lowerCamelCase_ =None
lowerCamelCase_ =torch.nn.Parameter(_A )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int =42
lowercase : Optional[int] =42
lowercase : Any =42
lowercase : str =42
lowercase : int =42
lowercase : int =42
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=_A, transformer=_A, text_encoder=_A, tokenizer=_A, scheduler=_A, learned_classifier_free_sampling_embeddings=_A, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =len(_A ) if isinstance(_A, _A ) else 1
# get prompt text embeddings
lowerCamelCase_ =self.tokenizer(
_A, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowerCamelCase_ =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase_ =text_input_ids[:, : self.tokenizer.model_max_length]
lowerCamelCase_ =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
lowerCamelCase_ =prompt_embeds / prompt_embeds.norm(dim=-1, keepdim=_A )
# duplicate text embeddings for each generation per prompt
lowerCamelCase_ =prompt_embeds.repeat_interleave(_A, dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
lowerCamelCase_ =self.learned_classifier_free_sampling_embeddings.embeddings
lowerCamelCase_ =negative_prompt_embeds.unsqueeze(0 ).repeat(_A, 1, 1 )
else:
lowerCamelCase_ =[''''''] * batch_size
lowerCamelCase_ =text_input_ids.shape[-1]
lowerCamelCase_ =self.tokenizer(
_A, padding='''max_length''', max_length=_A, truncation=_A, return_tensors='''pt''', )
lowerCamelCase_ =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
lowerCamelCase_ =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1, keepdim=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ =negative_prompt_embeds.shape[1]
lowerCamelCase_ =negative_prompt_embeds.repeat(1, _A, 1 )
lowerCamelCase_ =negative_prompt_embeds.view(batch_size * num_images_per_prompt, _A, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self, lowerCAmelCase, lowerCAmelCase = 100, lowerCAmelCase = 5.0, lowerCAmelCase = 1.0, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = 1, ):
"""simple docstring"""
if isinstance(_A, _A ):
lowerCamelCase_ =1
elif isinstance(_A, _A ):
lowerCamelCase_ =len(_A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(_A )}''' )
lowerCamelCase_ =batch_size * num_images_per_prompt
lowerCamelCase_ =guidance_scale > 1.0
lowerCamelCase_ =self._encode_prompt(_A, _A, _A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A, _A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(_A )}.''' )
# get the initial completely masked latents unless the user supplied it
lowerCamelCase_ =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
lowerCamelCase_ =self.transformer.num_vector_embeds - 1
lowerCamelCase_ =torch.full(_A, _A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
lowerCamelCase_ =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A, device=self.device )
lowerCamelCase_ =self.scheduler.timesteps.to(self.device )
lowerCamelCase_ =latents
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the sample if we are doing classifier free guidance
lowerCamelCase_ =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
lowerCamelCase_ =self.transformer(_A, encoder_hidden_states=_A, timestep=_A ).sample
if do_classifier_free_guidance:
lowerCamelCase_ =model_output.chunk(2 )
lowerCamelCase_ =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_A, dim=1, keepdim=_A )
lowerCamelCase_ =self.truncate(_A, _A )
# remove `log(0)`'s (`-inf`s)
lowerCamelCase_ =model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(_A, timestep=_A, sample=_A, generator=_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A, _A, _A )
lowerCamelCase_ =self.vqvae.config.vq_embed_dim
lowerCamelCase_ =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
lowerCamelCase_ =self.vqvae.quantize.get_codebook_entry(_A, shape=_A )
lowerCamelCase_ =self.vqvae.decode(_A, force_not_quantize=_A ).sample
lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 )
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =torch.sort(_A, 1, descending=_A )
lowerCamelCase_ =torch.exp(_A )
lowerCamelCase_ =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
lowerCamelCase_ =torch.full_like(keep_mask[:, 0:1, :], _A )
lowerCamelCase_ =torch.cat((all_true, keep_mask), dim=1 )
lowerCamelCase_ =keep_mask[:, :-1, :]
lowerCamelCase_ =keep_mask.gather(1, indices.argsort(1 ) )
lowerCamelCase_ =log_p_x_0.clone()
lowerCamelCase_ =-torch.inf # -inf = log(0)
return rv
| 75
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_UpperCamelCase : Optional[int] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class snake_case__ ( unittest.TestCase):
@classmethod
def A ( cls : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(_A )
@classmethod
def A ( cls : int ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : List[str] = FlaxBertModel(_A )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A , repo_id='''test-model-flax''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel(_A )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[int] = flatten_dict(modela.params )
UpperCAmelCase_ : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase_ : int = False
return models_are_equal
@require_flax
class snake_case__ ( unittest.TestCase):
def A ( self : Any ) -> Any:
UpperCAmelCase_ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Any = FlaxBertModel(_A )
UpperCAmelCase_ : Tuple = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) )
with self.assertRaises(_A ):
UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Tuple:
UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Tuple = FlaxBertModel(_A )
UpperCAmelCase_ : str = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) , max_shard_size='''10KB''' )
with self.assertRaises(_A ):
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Optional[int]:
UpperCAmelCase_ : int = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Optional[Any] = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
| 304
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
a__ : Dict = ViTImageProcessor if is_vision_available() else None
@property
def a ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self : Tuple ):
__UpperCAmelCase = (3, 32, 1_28)
__UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__UpperCAmelCase = dict(zip(_A , range(len(_A ) ) ) )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
__UpperCAmelCase = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 1_28},
}
__UpperCAmelCase = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_A , _A )
def a ( self : Optional[Any] , **_lowercase : Any ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_A )
def a ( self : Union[str, Any] , **_lowercase : Union[str, Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def a ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def a ( self : List[str] ):
__UpperCAmelCase = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
__UpperCAmelCase = Image.fromarray(np.moveaxis(_A , 0 , -1 ) )
return image_input
def a ( self : Dict ):
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def a ( self : Any ):
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__UpperCAmelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
__UpperCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A )
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = image_processor(_A , return_tensors='''np''' )
__UpperCAmelCase = processor(images=_A , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A )
__UpperCAmelCase = '''test'''
__UpperCAmelCase = processor(text=_A )
__UpperCAmelCase = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A )
__UpperCAmelCase = '''test'''
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def a ( self : List[Any] ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A )
__UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase = processor.char_decode(_A )
__UpperCAmelCase = tokenizer.batch_decode(_A )
__UpperCAmelCase = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_A , _A )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A )
__UpperCAmelCase = None
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def a ( self : Dict ):
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = MgpstrProcessor(tokenizer=_A , image_processor=_A )
__UpperCAmelCase = torch.randn(1 , 27 , 38 )
__UpperCAmelCase = torch.randn(1 , 27 , 5_02_57 )
__UpperCAmelCase = torch.randn(1 , 27 , 3_05_22 )
__UpperCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 332
|
'''simple docstring'''
_UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_UpperCamelCase : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
_UpperCamelCase : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 304
| 0
|
from collections import deque
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = process_name # process name
lowerCAmelCase__ : int = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCAmelCase__ : List[str] = arrival_time
lowerCAmelCase__ : List[str] = burst_time # remaining burst time
lowerCAmelCase__ : List[Any] = 0 # total time of the process wait in ready queue
lowerCAmelCase__ : Optional[int] = 0 # time from arrival time to completion time
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,) -> None:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCAmelCase__ : Any = time_slices
# unfinished process is in this ready_queue
lowerCAmelCase__ : List[str] = queue
# current time
lowerCAmelCase__ : Dict = current_time
# finished process is in this sequence queue
lowerCAmelCase__ : deque[Process] = deque()
def lowerCAmelCase__ (self ) -> list[str]:
"""simple docstring"""
lowerCAmelCase__ : int = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> list[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
for i in range(len(_A ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> list[int]:
"""simple docstring"""
lowerCAmelCase__ : Dict = []
for i in range(len(_A ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> list[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = []
for i in range(len(_A ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> deque[Process]:
"""simple docstring"""
lowerCAmelCase__ : deque[Process] = deque() # sequence deque of finished process
while len(_A ) != 0:
lowerCAmelCase__ : Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_A )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCAmelCase__ : Dict = 0
# set the process's turnaround time because it is finished
lowerCAmelCase__ : str = self.current_time - cp.arrival_time
# set the completion time
lowerCAmelCase__ : Any = self.current_time
# add the process to queue that has finished queue
finished.append(_A )
self.finish_queue.extend(_A ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
lowerCAmelCase__ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_A ) ):
lowerCAmelCase__ : Optional[int] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_A )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCAmelCase__ : Union[str, Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_A )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCAmelCase__ : Union[str, Any] = 0
# set the finish time
lowerCAmelCase__ : Tuple = self.current_time
# update the process' turnaround time because it is finished
lowerCAmelCase__ : List[str] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_A )
self.finish_queue.extend(_A ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCAmelCase__ (self ) -> deque[Process]:
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
lowerCAmelCase__ : str = self.round_robin(
self.ready_queue ,self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__snake_case : Tuple =Process('P1', 0, 5_3)
__snake_case : Optional[Any] =Process('P2', 0, 1_7)
__snake_case : int =Process('P3', 0, 6_8)
__snake_case : int =Process('P4', 0, 2_4)
__snake_case : List[Any] =3
__snake_case : Any =[1_7, 2_5]
__snake_case : Optional[Any] =deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
__snake_case : Optional[Any] =Process('P1', 0, 5_3)
__snake_case : List[str] =Process('P2', 0, 1_7)
__snake_case : int =Process('P3', 0, 6_8)
__snake_case : Union[str, Any] =Process('P4', 0, 2_4)
__snake_case : Optional[Any] =3
__snake_case : Tuple =[1_7, 2_5]
__snake_case : Tuple =deque([Pa, Pa, Pa, Pa])
__snake_case : Dict =MLFQ(number_of_queues, time_slices, queue, 0)
__snake_case : Optional[Any] =mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 129
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __UpperCAmelCase ( A : List[str] , A : Any , A : Optional[int] , A : Optional[int] ) -> Optional[Any]:
if isinstance(A , A ):
UpperCAmelCase_ : Any = np.full((len(A ), sequence_length, 2) , A )
else:
UpperCAmelCase_ : int = np.full((len(A ), sequence_length) , A )
for i, tensor in enumerate(A ):
if padding_side == "right":
if isinstance(A , A ):
UpperCAmelCase_ : Tuple = tensor[:sequence_length]
else:
UpperCAmelCase_ : Dict = tensor[:sequence_length]
else:
if isinstance(A , A ):
UpperCAmelCase_ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase_ : int = tensor[:sequence_length]
return out_tensor.tolist()
def __UpperCAmelCase ( A : List[Any] ) -> str:
UpperCAmelCase_ : Dict = ord(A )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
UpperCAmelCase_ : Union[str, Any] = unicodedata.category(A )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class snake_case__ ( UpperCamelCase):
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = -100
a_ = "pt"
def A ( self : List[Any] , _A : Dict ) -> Tuple:
import torch
UpperCAmelCase_ : Dict = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase_ : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase_ : Tuple = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase_ : Any = torch.tensor(batch['''entity_ids'''] ).shape[1]
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase_ : Optional[Any] = [
list(_A ) + [self.label_pad_token_id] * (sequence_length - len(_A )) for label in labels
]
else:
UpperCAmelCase_ : Any = [
[self.label_pad_token_id] * (sequence_length - len(_A )) + list(_A ) for label in labels
]
UpperCAmelCase_ : Union[str, Any] = [feature['''ner_tags'''] for feature in features]
UpperCAmelCase_ : Union[str, Any] = padding_tensor(_A , -1 , _A , _A )
UpperCAmelCase_ : List[str] = [feature['''original_entity_spans'''] for feature in features]
UpperCAmelCase_ : int = padding_tensor(_A , (-1, -1) , _A , _A )
UpperCAmelCase_ : Union[str, Any] = {k: torch.tensor(_A , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 304
| 0
|
import numpy as np
class __lowerCamelCase :
def __init__( self: Optional[Any],A_: Optional[Any]=None,A_: List[str]=None,A_: Dict=None,A_: Union[str, Any]=None,A_: Optional[int]=None ):
'''simple docstring'''
self.set_matricies(red=_A,green=_A,blue=_A,red_edge=_A,nir=_A )
def snake_case_ ( self: List[str],A_: Dict=None,A_: Union[str, Any]=None,A_: Optional[int]=None,A_: Optional[int]=None,A_: Any=None ):
'''simple docstring'''
if red is not None:
__UpperCamelCase = red
if green is not None:
__UpperCamelCase = green
if blue is not None:
__UpperCamelCase = blue
if red_edge is not None:
__UpperCamelCase = red_edge
if nir is not None:
__UpperCamelCase = nir
return True
def snake_case_ ( self: Dict,A_: Tuple="",A_: List[str]=None,A_: str=None,A_: List[str]=None,A_: Tuple=None,A_: Union[str, Any]=None ):
'''simple docstring'''
self.set_matricies(red=_A,green=_A,blue=_A,red_edge=_A,nir=_A )
__UpperCamelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def snake_case_ ( self: str ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def snake_case_ ( self: Dict ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def snake_case_ ( self: Dict ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def snake_case_ ( self: int ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def snake_case_ ( self: int ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def snake_case_ ( self: Any ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def snake_case_ ( self: Dict ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def snake_case_ ( self: int,A_: Tuple=0.0_8,A_: str=1.2_2,A_: List[Any]=0.0_3 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return (self.nir / self.green) - 1
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def snake_case_ ( self: List[str] ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def snake_case_ ( self: int ):
'''simple docstring'''
return self.nir - self.green
def snake_case_ ( self: int ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def snake_case_ ( self: Optional[Any],A_: int=0.1_6 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def snake_case_ ( self: List[Any],A_: List[Any]=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def snake_case_ ( self: str ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def snake_case_ ( self: Union[str, Any],A_: Tuple=None,A_: str=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def snake_case_ ( self: List[str] ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 3_0.5
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
return self.nir / self.red
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def snake_case_ ( self: str ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def snake_case_ ( self: List[str] ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def snake_case_ ( self: Any ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__UpperCamelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def snake_case_ ( self: int ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
return self.nir / self.red
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def snake_case_ ( self: Any ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 310
|
'''simple docstring'''
import functools
def __UpperCAmelCase ( A : str , A : str ) -> int:
UpperCAmelCase_ : Optional[Any] = len(A )
UpperCAmelCase_ : List[str] = len(A )
@functools.cache
def min_distance(A : int , A : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , A ) , 1 + min_distance(A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 0
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( UpperCamelCase_ , UpperCamelCase_=0.999 , UpperCamelCase_="cosine" , ) -> Union[str, Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
SCREAMING_SNAKE_CASE__ = []
for i in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase_ ) / alpha_bar_fn(UpperCamelCase_ ) , UpperCamelCase_ ) )
return torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ):
A__ : Tuple =[e.name for e in KarrasDiffusionSchedulers]
A__ : Optional[Any] =2
@register_to_config
def __init__( self : Optional[Any] , UpperCAmelCase_ : int = 1000 , UpperCAmelCase_ : float = 0.00_085 , UpperCAmelCase_ : float = 0.012 , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : Optional[Union[np.ndarray, List[float]]] = None , UpperCAmelCase_ : str = "epsilon" , UpperCAmelCase_ : Optional[bool] = False , UpperCAmelCase_ : Optional[bool] = False , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : str = "linspace" , UpperCAmelCase_ : int = 0 , ):
if trained_betas is not None:
SCREAMING_SNAKE_CASE__ = torch.tensor(_A , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE__ = torch.linspace(_A , _A , _A , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _A , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE__ = betas_for_alpha_bar(_A , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
SCREAMING_SNAKE_CASE__ = betas_for_alpha_bar(_A , alpha_transform_type='exp' )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
SCREAMING_SNAKE_CASE__ = 1.0 - self.betas
SCREAMING_SNAKE_CASE__ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_A , _A , _A )
SCREAMING_SNAKE_CASE__ = use_karras_sigmas
def A_ ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=None ):
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE__ = self.timesteps
SCREAMING_SNAKE_CASE__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE__ = 1 if len(_A ) > 1 else 0
else:
SCREAMING_SNAKE_CASE__ = timestep.cpu().item() if torch.is_tensor(_A ) else timestep
SCREAMING_SNAKE_CASE__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def A_ ( self : Optional[int] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def A_ ( self : Any , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Union[float, torch.FloatTensor] , ):
SCREAMING_SNAKE_CASE__ = self.index_for_timestep(_A )
SCREAMING_SNAKE_CASE__ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, torch.device] = None , UpperCAmelCase_ : Optional[int] = None , ):
SCREAMING_SNAKE_CASE__ = num_inference_steps
SCREAMING_SNAKE_CASE__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE__ = np.linspace(0 , num_train_timesteps - 1 , _A , dtype=_A )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__ = (np.arange(0 , _A ) * step_ratio).round()[::-1].copy().astype(_A )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__ = (np.arange(_A , 0 , -step_ratio )).round().copy().astype(_A )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
SCREAMING_SNAKE_CASE__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE__ = np.log(_A )
SCREAMING_SNAKE_CASE__ = np.interp(_A , np.arange(0 , len(_A ) ) , _A )
if self.config.use_karras_sigmas:
SCREAMING_SNAKE_CASE__ = self._convert_to_karras(in_sigmas=_A , num_inference_steps=self.num_inference_steps )
SCREAMING_SNAKE_CASE__ = np.array([self._sigma_to_t(_A , _A ) for sigma in sigmas] )
SCREAMING_SNAKE_CASE__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(_A ).to(device=_A )
SCREAMING_SNAKE_CASE__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(_A )
SCREAMING_SNAKE_CASE__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_A ).startswith('mps' ):
# mps does not support float64
SCREAMING_SNAKE_CASE__ = timesteps.to(_A , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE__ = timesteps.to(device=_A )
# empty dt and derivative
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE__ = defaultdict(_A )
def A_ ( self : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple ):
# get log sigma
SCREAMING_SNAKE_CASE__ = np.log(_A )
# get distribution
SCREAMING_SNAKE_CASE__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
SCREAMING_SNAKE_CASE__ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE__ = low_idx + 1
SCREAMING_SNAKE_CASE__ = log_sigmas[low_idx]
SCREAMING_SNAKE_CASE__ = log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE__ = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE__ = np.clip(_A , 0 , 1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE__ = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE__ = t.reshape(sigma.shape )
return t
def A_ ( self : Dict , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = in_sigmas[-1].item()
SCREAMING_SNAKE_CASE__ = in_sigmas[0].item()
SCREAMING_SNAKE_CASE__ = 7.0 # 7.0 is the value used in the paper
SCREAMING_SNAKE_CASE__ = np.linspace(0 , 1 , _A )
SCREAMING_SNAKE_CASE__ = sigma_min ** (1 / rho)
SCREAMING_SNAKE_CASE__ = sigma_max ** (1 / rho)
SCREAMING_SNAKE_CASE__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def A_ ( self : Any ):
return self.dt is None
def A_ ( self : str , UpperCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , UpperCAmelCase_ : Union[float, torch.FloatTensor] , UpperCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , UpperCAmelCase_ : bool = True , ):
SCREAMING_SNAKE_CASE__ = self.index_for_timestep(_A )
# advance index counter by 1
SCREAMING_SNAKE_CASE__ = timestep.cpu().item() if torch.is_tensor(_A ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE__ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
SCREAMING_SNAKE_CASE__ = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__ = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE__ = sigma_next - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE__ = derivative
SCREAMING_SNAKE_CASE__ = dt
SCREAMING_SNAKE_CASE__ = sample
else:
# 2. 2nd order / Heun's method
SCREAMING_SNAKE_CASE__ = (sample - pred_original_sample) / sigma_next
SCREAMING_SNAKE_CASE__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
SCREAMING_SNAKE_CASE__ = self.dt
SCREAMING_SNAKE_CASE__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_A )
def A_ ( self : Optional[int] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
SCREAMING_SNAKE_CASE__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_A ):
# mps does not support float64
SCREAMING_SNAKE_CASE__ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE__ = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE__ = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE__ = [self.index_for_timestep(_A , _A ) for t in timesteps]
SCREAMING_SNAKE_CASE__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE__ = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ = original_samples + noise * sigma
return noisy_samples
def __len__( self : List[Any] ):
return self.config.num_train_timesteps
| 176
|
'''simple docstring'''
def __UpperCAmelCase ( A : int = 1_0_0_0 ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = 1, 1
UpperCAmelCase_ : Dict = []
for i in range(1 , n + 1 ):
UpperCAmelCase_ : Optional[int] = prev_numerator + 2 * prev_denominator
UpperCAmelCase_ : Tuple = prev_numerator + prev_denominator
if len(str(A ) ) > len(str(A ) ):
result.append(A )
UpperCAmelCase_ : Optional[Any] = numerator
UpperCAmelCase_ : Optional[int] = denominator
return len(A )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 304
| 0
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
UpperCAmelCase_ = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
UpperCAmelCase_ = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
UpperCAmelCase_ = {
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
UpperCAmelCase_ = {
'num_train_timesteps': 40,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
UpperCAmelCase_ = {
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
UpperCAmelCase_ = {
'num_train_timesteps': 151,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def lowerCAmelCase_ ( __UpperCAmelCase: Dict ) -> int:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: Dict , __UpperCAmelCase: List[Any]=False ) -> Optional[int]:
UpperCamelCase__ : List[str] = checkpoint[f"{old_prefix}.in_layers.0.weight"]
UpperCamelCase__ : Optional[Any] = checkpoint[f"{old_prefix}.in_layers.0.bias"]
UpperCamelCase__ : List[str] = checkpoint[f"{old_prefix}.in_layers.2.weight"]
UpperCamelCase__ : Optional[int] = checkpoint[f"{old_prefix}.in_layers.2.bias"]
UpperCamelCase__ : Tuple = checkpoint[f"{old_prefix}.emb_layers.1.weight"]
UpperCamelCase__ : List[str] = checkpoint[f"{old_prefix}.emb_layers.1.bias"]
UpperCamelCase__ : Optional[Any] = checkpoint[f"{old_prefix}.out_layers.0.weight"]
UpperCamelCase__ : List[Any] = checkpoint[f"{old_prefix}.out_layers.0.bias"]
UpperCamelCase__ : str = checkpoint[f"{old_prefix}.out_layers.3.weight"]
UpperCamelCase__ : str = checkpoint[f"{old_prefix}.out_layers.3.bias"]
if has_skip:
UpperCamelCase__ : int = checkpoint[f"{old_prefix}.skip_connection.weight"]
UpperCamelCase__ : List[str] = checkpoint[f"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: List[Any] , __UpperCAmelCase: int , __UpperCAmelCase: List[str] , __UpperCAmelCase: int=None ) -> Optional[int]:
UpperCamelCase__ : Union[str, Any] = checkpoint[f"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
UpperCamelCase__ : Dict = checkpoint[f"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
UpperCamelCase__ : List[Any] = checkpoint[f"{old_prefix}.norm.weight"]
UpperCamelCase__ : List[Any] = checkpoint[f"{old_prefix}.norm.bias"]
UpperCamelCase__ : Dict = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCamelCase__ : int = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCamelCase__ : List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCamelCase__ : int = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCamelCase__ : List[str] = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCamelCase__ : List[str] = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCamelCase__ : Any = (
checkpoint[f"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
UpperCamelCase__ : str = checkpoint[f"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: Tuple ) -> Any:
UpperCamelCase__ : Tuple = torch.load(__UpperCAmelCase , map_location='''cpu''' )
UpperCamelCase__ : str = {}
UpperCamelCase__ : Optional[Any] = checkpoint['''time_embed.0.weight''']
UpperCamelCase__ : Any = checkpoint['''time_embed.0.bias''']
UpperCamelCase__ : Optional[int] = checkpoint['''time_embed.2.weight''']
UpperCamelCase__ : List[Any] = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
UpperCamelCase__ : Optional[int] = checkpoint['''label_emb.weight''']
UpperCamelCase__ : Any = checkpoint['''input_blocks.0.0.weight''']
UpperCamelCase__ : int = checkpoint['''input_blocks.0.0.bias''']
UpperCamelCase__ : List[str] = unet_config['''down_block_types''']
UpperCamelCase__ : Dict = unet_config['''layers_per_block''']
UpperCamelCase__ : str = unet_config['''attention_head_dim''']
UpperCamelCase__ : Union[str, Any] = unet_config['''block_out_channels''']
UpperCamelCase__ : List[str] = 1
UpperCamelCase__ : List[str] = channels_list[0]
for i, layer_type in enumerate(__UpperCAmelCase ):
UpperCamelCase__ : List[Any] = channels_list[i]
UpperCamelCase__ : Union[str, Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__UpperCAmelCase ):
UpperCamelCase__ : int = f"down_blocks.{i}.resnets.{j}"
UpperCamelCase__ : List[Any] = f"input_blocks.{current_layer}.0"
UpperCamelCase__ : List[str] = True if j == 0 and downsample_block_has_skip else False
UpperCamelCase__ : Optional[Any] = convert_resnet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , has_skip=__UpperCAmelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__UpperCAmelCase ):
UpperCamelCase__ : List[str] = f"down_blocks.{i}.resnets.{j}"
UpperCamelCase__ : List[str] = f"input_blocks.{current_layer}.0"
UpperCamelCase__ : List[Any] = True if j == 0 and downsample_block_has_skip else False
UpperCamelCase__ : Any = convert_resnet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , has_skip=__UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] = f"down_blocks.{i}.attentions.{j}"
UpperCamelCase__ : Union[str, Any] = f"input_blocks.{current_layer}.1"
UpperCamelCase__ : Union[str, Any] = convert_attention(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
current_layer += 1
if i != len(__UpperCAmelCase ) - 1:
UpperCamelCase__ : Optional[Any] = f"down_blocks.{i}.downsamplers.0"
UpperCamelCase__ : Any = f"input_blocks.{current_layer}.0"
UpperCamelCase__ : str = convert_resnet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
current_layer += 1
UpperCamelCase__ : List[str] = current_channels
# hardcoded the mid-block for now
UpperCamelCase__ : Union[str, Any] = '''mid_block.resnets.0'''
UpperCamelCase__ : str = '''middle_block.0'''
UpperCamelCase__ : str = convert_resnet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : str = '''mid_block.attentions.0'''
UpperCamelCase__ : Tuple = '''middle_block.1'''
UpperCamelCase__ : Any = convert_attention(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Optional[int] = '''mid_block.resnets.1'''
UpperCamelCase__ : Union[str, Any] = '''middle_block.2'''
UpperCamelCase__ : List[str] = convert_resnet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Dict = 0
UpperCamelCase__ : Tuple = unet_config['''up_block_types''']
for i, layer_type in enumerate(__UpperCAmelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCamelCase__ : Tuple = f"up_blocks.{i}.resnets.{j}"
UpperCamelCase__ : Optional[int] = f"output_blocks.{current_layer}.0"
UpperCamelCase__ : str = convert_resnet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , has_skip=__UpperCAmelCase )
current_layer += 1
if i != len(__UpperCAmelCase ) - 1:
UpperCamelCase__ : int = f"up_blocks.{i}.upsamplers.0"
UpperCamelCase__ : Dict = f"output_blocks.{current_layer-1}.1"
UpperCamelCase__ : int = convert_resnet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCamelCase__ : Any = f"up_blocks.{i}.resnets.{j}"
UpperCamelCase__ : int = f"output_blocks.{current_layer}.0"
UpperCamelCase__ : Optional[int] = convert_resnet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , has_skip=__UpperCAmelCase )
UpperCamelCase__ : int = f"up_blocks.{i}.attentions.{j}"
UpperCamelCase__ : Any = f"output_blocks.{current_layer}.1"
UpperCamelCase__ : Any = convert_attention(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
current_layer += 1
if i != len(__UpperCAmelCase ) - 1:
UpperCamelCase__ : Optional[Any] = f"up_blocks.{i}.upsamplers.0"
UpperCamelCase__ : Dict = f"output_blocks.{current_layer-1}.2"
UpperCamelCase__ : Union[str, Any] = convert_resnet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Optional[int] = checkpoint['''out.0.weight''']
UpperCamelCase__ : Optional[Any] = checkpoint['''out.0.bias''']
UpperCamelCase__ : Tuple = checkpoint['''out.2.weight''']
UpperCamelCase__ : str = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = strabool(args.class_cond)
UpperCAmelCase_ = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
UpperCAmelCase_ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCAmelCase_ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
UpperCAmelCase_ = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
UpperCAmelCase_ = None
UpperCAmelCase_ = con_pt_to_diffuser(args.unet_path, unet_config)
UpperCAmelCase_ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
UpperCAmelCase_ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
UpperCAmelCase_ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCAmelCase_ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
UpperCAmelCase_ = CMStochasticIterativeScheduler(**scheduler_config)
UpperCAmelCase_ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 201
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case__ ( unittest.TestCase):
def __init__( self : int , _A : List[str] , _A : Dict=7 , _A : List[str]=3 , _A : List[str]=18 , _A : Dict=30 , _A : Union[str, Any]=4_00 , _A : List[str]=True , _A : List[str]=None , _A : int=True , _A : Tuple=None , _A : Union[str, Any]=True , _A : Tuple=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Tuple=False , ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Optional[Any] = do_center_crop
UpperCAmelCase_ : Optional[int] = crop_size
UpperCAmelCase_ : Tuple = do_normalize
UpperCAmelCase_ : Optional[Any] = image_mean
UpperCAmelCase_ : int = image_std
UpperCAmelCase_ : List[Any] = do_reduce_labels
def A ( self : Union[str, Any] ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : Optional[Any] = Image.open(dataset[0]['''file'''] )
UpperCAmelCase_ : str = Image.open(dataset[1]['''file'''] )
return image, map
def __UpperCAmelCase ( ) -> Any:
UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : int = Image.open(ds[0]['''file'''] )
UpperCAmelCase_ : Optional[Any] = Image.open(ds[1]['''file'''] )
UpperCAmelCase_ : Dict = Image.open(ds[2]['''file'''] )
UpperCAmelCase_ : List[str] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = BeitImageProcessor if is_vision_available() else None
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = BeitImageProcessingTester(self )
@property
def A ( self : List[Any] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''center_crop''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
def A ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , _A )
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , _A )
def A ( self : Optional[Any] ) -> Any:
pass
def A ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[int] ) -> str:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Any ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
UpperCAmelCase_ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : List[str] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_semantic_batch_inputs()
UpperCAmelCase_ : int = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def A ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 304
| 0
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
def __init__( self : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[Any]=13 ,lowerCamelCase__ : str=30 ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Any=32 ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : List[str]=4 ,lowerCamelCase__ : List[Any]=37 ,lowerCamelCase__ : Any="gelu" ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : Optional[int]=10 ,lowerCamelCase__ : List[str]=0.0_2 ,lowerCamelCase__ : str=3 ,lowerCamelCase__ : str=None ,):
'''simple docstring'''
_UpperCamelCase : Tuple = parent
_UpperCamelCase : int = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : str = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : int = is_training
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : int = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : str = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_A ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = TFViTModel(config=_A )
_UpperCamelCase : Dict = model(_A ,training=_A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_UpperCamelCase : Optional[int] = self.image_size // 2
_UpperCamelCase : List[str] = pixel_values[:, :, :image_size, :image_size]
_UpperCamelCase : Optional[Any] = model(_A ,interpolate_pos_encoding=_A ,training=_A )
_UpperCamelCase : Optional[int] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.type_sequence_label_size
_UpperCamelCase : Dict = TFViTForImageClassification(_A )
_UpperCamelCase : Tuple = model(_A ,labels=_A ,training=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_UpperCamelCase : List[str] = self.image_size // 2
_UpperCamelCase : List[str] = pixel_values[:, :, :image_size, :image_size]
_UpperCamelCase : List[Any] = model(_A ,interpolate_pos_encoding=_A ,training=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Dict = 1
_UpperCamelCase : List[Any] = TFViTForImageClassification(_A )
_UpperCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Any = model(_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : str = self.prepare_config_and_inputs()
_UpperCamelCase : Dict = config_and_inputs
_UpperCamelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( lowercase , lowercase , unittest.TestCase ):
lowercase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowercase__ = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = TFViTModelTester(self )
_UpperCamelCase : Union[str, Any] = ConfigTester(self ,config_class=_A ,has_text_modality=_A ,hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[str] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
_UpperCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A ,tf.keras.layers.Layer ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Optional[Any] = model_class(_A )
_UpperCamelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Union[str, Any] = [*signature.parameters.keys()]
_UpperCamelCase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : str = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_A )
def A__ ( ):
_UpperCamelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Tuple = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
_UpperCamelCase : Union[str, Any] = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : List[Any] = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_UpperCamelCase : Optional[int] = model(**_A )
# verify the logits
_UpperCamelCase : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_UpperCamelCase : Tuple = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] ,_A ,atol=1E-4 )
| 83
|
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class snake_case__ ( enum.Enum):
a_ = 0
a_ = 1
a_ = 2
@add_end_docstrings(UpperCamelCase)
class snake_case__ ( UpperCamelCase):
a_ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : List[str] , *_A : Dict , **_A : int ) -> Optional[int]:
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase_ : Dict = None
if self.model.config.prefix is not None:
UpperCAmelCase_ : Tuple = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase_ : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._sanitize_parameters(prefix=_A , **self._forward_params )
UpperCAmelCase_ : int = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase_ : List[str] = {**self._forward_params, **forward_params}
def A ( self : Union[str, Any] , _A : int=None , _A : str=None , _A : Union[str, Any]=None , _A : List[Any]=None , _A : List[Any]=None , _A : int=None , _A : Optional[int]=None , _A : List[Any]=None , **_A : List[Any] , ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = {}
if prefix is not None:
UpperCAmelCase_ : List[Any] = prefix
if prefix:
UpperCAmelCase_ : Tuple = self.tokenizer(
_A , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase_ : List[Any] = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
''' [None, \'hole\']''' )
UpperCAmelCase_ : Union[str, Any] = handle_long_generation
preprocess_params.update(_A )
UpperCAmelCase_ : Optional[int] = generate_kwargs
UpperCAmelCase_ : Tuple = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase_ : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
UpperCAmelCase_ : List[Any] = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase_ : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ : Any = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase_ : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def A ( self : Dict , *_A : Optional[Any] , **_A : Any ) -> Any:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_A , **_A )
def __call__( self : List[Any] , _A : Union[str, Any] , **_A : List[str] ) -> Dict:
return super().__call__(_A , **_A )
def A ( self : List[Any] , _A : List[Any] , _A : Any="" , _A : Dict=None , **_A : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.tokenizer(
prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
UpperCAmelCase_ : str = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase_ : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase_ : Optional[int] = generate_kwargs['''max_new_tokens''']
else:
UpperCAmelCase_ : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase_ : Dict = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
UpperCAmelCase_ : List[str] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase_ : Optional[int] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def A ( self : List[str] , _A : Optional[Any] , **_A : str ) -> Optional[int]:
UpperCAmelCase_ : Any = model_inputs['''input_ids''']
UpperCAmelCase_ : Dict = model_inputs.get('''attention_mask''' , _A )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = 1
else:
UpperCAmelCase_ : Optional[int] = input_ids.shape[0]
UpperCAmelCase_ : Dict = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase_ : List[str] = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
UpperCAmelCase_ : str = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase_ : Any = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase_ : Optional[Any] = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase_ : Union[str, Any] = self.model.generate(input_ids=_A , attention_mask=_A , **_A )
UpperCAmelCase_ : Any = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase_ : List[str] = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase_ : int = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def A ( self : int , _A : List[Any] , _A : Dict=ReturnType.FULL_TEXT , _A : Dict=True ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = model_outputs['''generated_sequence'''][0]
UpperCAmelCase_ : int = model_outputs['''input_ids''']
UpperCAmelCase_ : str = model_outputs['''prompt_text''']
UpperCAmelCase_ : Any = generated_sequence.numpy().tolist()
UpperCAmelCase_ : int = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ : Optional[Any] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase_ : Any = self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase_ : List[str] = 0
else:
UpperCAmelCase_ : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase_ : Dict = prompt_text + text[prompt_length:]
else:
UpperCAmelCase_ : Dict = text[prompt_length:]
UpperCAmelCase_ : List[str] = {'''generated_text''': all_text}
records.append(_A )
return records
| 304
| 0
|
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : List[str] ) -> List[str]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = ''''''
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( _snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return data[1:] + data[0]
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Tuple ) -> Dict:
'''simple docstring'''
__magic_name__ : Any = ''''''
for i in range(len(_snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( _snake_case : int , _snake_case : int ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = int("0b" + data[0] + data[-1] , 2 )
__magic_name__ : str = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ) -> Any:
'''simple docstring'''
__magic_name__ : Dict = message[:4]
__magic_name__ : List[Any] = message[4:]
__magic_name__ : Tuple = apply_table(_snake_case , _snake_case )
__magic_name__ : Tuple = xor(_snake_case , _snake_case )
__magic_name__ : str = apply_sbox(_snake_case , temp[:4] ) # noqa: E741
__magic_name__ : List[str] = apply_sbox(_snake_case , temp[4:] )
__magic_name__ : Tuple = '''0''' * (2 - len(_snake_case )) + l # noqa: E741
__magic_name__ : Union[str, Any] = '''0''' * (2 - len(_snake_case )) + r
__magic_name__ : str = apply_table(l + r , _snake_case )
__magic_name__ : int = xor(_snake_case , _snake_case )
return temp + right
if __name__ == "__main__":
snake_case : Optional[int] = input("Enter 10 bit key: ")
snake_case : Union[str, Any] = input("Enter 8 bit message: ")
snake_case : Any = [6, 3, 7, 4, 8, 5, 10, 9]
snake_case : Dict = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
snake_case : Union[str, Any] = [2, 4, 3, 1]
snake_case : Union[str, Any] = [2, 6, 3, 1, 4, 8, 5, 7]
snake_case : List[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
snake_case : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
snake_case : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
snake_case : Optional[int] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
snake_case : Tuple = apply_table(key, paa_table)
snake_case : Any = temp[:5]
snake_case : Any = temp[5:]
snake_case : Optional[Any] = left_shift(left)
snake_case : int = left_shift(right)
snake_case : Tuple = apply_table(left + right, pa_table)
snake_case : Union[str, Any] = left_shift(left)
snake_case : str = left_shift(right)
snake_case : Dict = left_shift(left)
snake_case : Any = left_shift(right)
snake_case : int = apply_table(left + right, pa_table)
# encryption
snake_case : List[Any] = apply_table(message, IP)
snake_case : List[Any] = function(expansion, sa, sa, keya, temp)
snake_case : Dict = temp[4:] + temp[:4]
snake_case : List[str] = function(expansion, sa, sa, keya, temp)
snake_case : Any = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
snake_case : Union[str, Any] = apply_table(CT, IP)
snake_case : Any = function(expansion, sa, sa, keya, temp)
snake_case : List[Any] = temp[4:] + temp[:4]
snake_case : Optional[int] = function(expansion, sa, sa, keya, temp)
snake_case : Any = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 281
|
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCAmelCase ( A : int , A : int , A : bool , A : list[int] , A : float ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
)
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : List[str] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
UpperCAmelCase_ : List[Any] = math.log(len(A ) , 2 )
print(F"Optimal value : {minimax(0 , 0 , A , A , A )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 304
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase : Union[str, Any] = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 219
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : list , A : int , A : int , A : int ) -> list:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase_ : List[Any] = result + left + right
return input_list
def __UpperCAmelCase ( A : list ) -> list:
if len(A ) <= 1:
return input_list
UpperCAmelCase_ : List[str] = list(A )
# iteration for two-way merging
UpperCAmelCase_ : Tuple = 2
while p <= len(A ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(A ) , A ):
UpperCAmelCase_ : Union[str, Any] = i
UpperCAmelCase_ : int = i + p - 1
UpperCAmelCase_ : Any = (low + high + 1) // 2
UpperCAmelCase_ : Union[str, Any] = merge(A , A , A , A )
# final merge of last two parts
if p * 2 >= len(A ):
UpperCAmelCase_ : str = i
UpperCAmelCase_ : Tuple = merge(A , 0 , A , len(A ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
_UpperCamelCase : List[str] = []
else:
_UpperCamelCase : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 304
| 0
|
'''simple docstring'''
from collections import defaultdict
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Optional[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowerCamelCase_ : Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_A ) )
]
lowerCamelCase_ : Optional[Any] = defaultdict(_A ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowerCamelCase_ : Optional[int] = (1 << len(_A )) - 1
def UpperCAmelCase__ (self , A , A ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowerCamelCase_ : Tuple = self.count_ways_until(_A , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
lowerCamelCase_ : List[Any] = total_ways_util
return self.dp[mask][task_no]
def UpperCAmelCase__ (self , A ):
# Store the list of persons for each task
for i in range(len(_A ) ):
for j in task_performed[i]:
self.task[j].append(_A )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__lowercase : Dict = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__lowercase : Union[str, Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 318
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42
a_ = 42
a_ = 42
a_ = 42
a_ = 42
def A ( self : Tuple ) -> Optional[int]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def A ( self : List[Any] ) -> Union[str, Any]:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def A ( self : Any ) -> Optional[Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def A ( self : Optional[int] ) -> torch.Tensor:
UpperCAmelCase_ : Dict = torch.arange(self.height * self.width )
UpperCAmelCase_ : int = torch.stack(
[
pixel_indices % self.width,
torch.div(_A , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ , *UpperCAmelCase_ : Union[str, Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(_A ) )
UpperCAmelCase_ : Any = self.get_image_coords()
UpperCAmelCase_ : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Union[str, Any] = self.get_camera_rays(_A )
UpperCAmelCase_ : str = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def A ( self : Optional[int] , _A : torch.Tensor ) -> torch.Tensor:
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : Dict = coords.view(_A , -1 , 2 )
UpperCAmelCase_ : Union[str, Any] = self.resolution()
UpperCAmelCase_ : int = self.fov()
UpperCAmelCase_ : Dict = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[int] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Any = fracs.view(_A , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(_A , 1 , 3 )
+ self.x.view(_A , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=_A )
UpperCAmelCase_ : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_A , *_A , 2 , 3 )
def A ( self : Tuple , _A : int , _A : int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , )
def __UpperCAmelCase ( A : int ) -> DifferentiableProjectiveCamera:
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : str = np.array([np.sin(A ), np.cos(A ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : Optional[int] = -z * 4
UpperCAmelCase_ : Optional[int] = np.array([np.cos(A ), -np.sin(A ), 0.0] )
UpperCAmelCase_ : List[Any] = np.cross(A , A )
origins.append(A )
xs.append(A )
ys.append(A )
zs.append(A )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(A , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A , axis=0 ) ).float() , width=A , height=A , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A )) , )
| 304
| 0
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
a__: List[Any] = logging.getLogger(__name__)
a__: Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
a__: Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCamelCase__ )} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
__SCREAMING_SNAKE_CASE = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__SCREAMING_SNAKE_CASE = field(default=UpperCamelCase__ , metadata={'''help''': '''The input training data file (a text file).'''} )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__SCREAMING_SNAKE_CASE = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def UpperCamelCase ( self ):
if self.train_file is not None:
A__ = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
A__ = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] )->List[str]:
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
A__ = [json.loads(UpperCamelCase__ ) for line in f.read().splitlines() if (len(UpperCamelCase__ ) > 0 and not line.isspace())]
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
A__ = {c: dataset[c] for c in dataset.column_names}
A__ = refs
return Dataset.from_dict(UpperCamelCase__ )
def UpperCamelCase__( )->Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCamelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[:{data_args.validation_split_percentage}%]" , )
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[{data_args.validation_split_percentage}%:]" , )
else:
A__ = {}
if data_args.train_file is not None:
A__ = data_args.train_file
if data_args.validation_file is not None:
A__ = data_args.validation_file
A__ = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
A__ = '''text'''
A__ = load_dataset(UpperCamelCase__ , data_files=UpperCamelCase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
A__ = AutoConfig.from_pretrained(model_args.config_name , **UpperCamelCase__ )
elif model_args.model_name_or_path:
A__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase__ )
else:
A__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
A__ = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
A__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCamelCase__ )
elif model_args.model_name_or_path:
A__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCamelCase__ )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
A__ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
A__ = AutoModelForMaskedLM.from_config(UpperCamelCase__ )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
A__ = datasets['''train'''].column_names
else:
A__ = datasets['''validation'''].column_names
A__ = '''text''' if '''text''' in column_names else column_names[0]
A__ = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(UpperCamelCase__ : Any ):
# Remove empty lines
A__ = [line for line in examples['''text'''] if len(UpperCamelCase__ ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=data_args.max_seq_length )
A__ = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
A__ = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
A__ = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
A__ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
A__ = False
# Data collator
# This one will take care of randomly masking the tokens.
A__ = DataCollatorForWholeWordMask(tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A__ = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A__ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
A__ = model_args.model_name_or_path
else:
A__ = None
A__ = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A__ = trainer.evaluate()
A__ = math.exp(eval_output['''eval_loss'''] )
A__ = perplexity
A__ = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
return results
def UpperCamelCase__( UpperCamelCase__ : Tuple )->List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 193
|
'''simple docstring'''
import random
class snake_case__ :
@staticmethod
def A ( _A : str ) -> tuple[list[int], list[int]]:
UpperCAmelCase_ : Dict = [ord(_A ) for i in text]
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Any = []
for i in plain:
UpperCAmelCase_ : int = random.randint(1 , 3_00 )
UpperCAmelCase_ : str = (i + k) * k
cipher.append(_A )
key.append(_A )
return cipher, key
@staticmethod
def A ( _A : list[int] , _A : list[int] ) -> str:
UpperCAmelCase_ : Dict = []
for i in range(len(_A ) ):
UpperCAmelCase_ : int = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_A ) )
return "".join(_A )
if __name__ == "__main__":
_UpperCamelCase , _UpperCamelCase : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 304
| 0
|
'''simple docstring'''
import math
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __snake_case : float = 0.1 ) -> int:
"""simple docstring"""
lowerCamelCase_ =3
lowerCamelCase_ =3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = ReformerTokenizer
a_ = ReformerTokenizerFast
a_ = True
a_ = False
a_ = True
def A ( self : Optional[Any] ) -> List[Any]:
super().setUp()
UpperCAmelCase_ : Tuple = ReformerTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : List[Any] = '''<s>'''
UpperCAmelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_A ) , 10_00 )
def A ( self : Optional[int] ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def A ( self : Optional[Any] ) -> List[Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ : Any = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize(_A )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : List[str] = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase_ : int = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = tokenizer.encode(_A )
UpperCAmelCase_ : List[str] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def A ( self : Tuple , _A : Dict=15 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(_A , **_A )
# Simple input
UpperCAmelCase_ : Optional[int] = '''This is a simple input'''
UpperCAmelCase_ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCAmelCase_ : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
UpperCAmelCase_ : Dict = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
def A ( self : Union[str, Any] ) -> int:
pass
def A ( self : int ) -> Any:
UpperCAmelCase_ : Any = ReformerTokenizer(_A , keep_accents=_A )
UpperCAmelCase_ : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ : List[str] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def A ( self : List[str] ) -> Optional[int]:
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : Tuple = '''Hello World!'''
UpperCAmelCase_ : int = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def A ( self : List[Any] ) -> str:
UpperCAmelCase_ : Tuple = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
UpperCAmelCase_ : int = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def A ( self : List[str] ) -> Optional[int]:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
UpperCAmelCase_ : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ : List[Any] = ''' '''.join(_A )
UpperCAmelCase_ : str = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' )
UpperCAmelCase_ : Any = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
UpperCAmelCase_ : List[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
UpperCAmelCase_ : Any = encoded_sequence['''input_ids'''].shape
UpperCAmelCase_ : Optional[int] = ReformerModel(_A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def A ( self : int ) -> Optional[Any]:
# fmt: off
UpperCAmelCase_ : int = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
UpperCAmelCase_ : Optional[Any] = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=_A , sequences=_A , )
| 304
| 0
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_lowercase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
a__ : Dict = 10_000
a__ : Dict = None
a__ : Optional[Any] = None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
a__ : Any = ParquetConfig
def a ( self : str ):
return datasets.DatasetInfo(features=self.config.features )
def a ( self : Optional[Any] , _lowercase : Dict ):
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__UpperCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
__UpperCAmelCase = data_files
if isinstance(_A , _A ):
__UpperCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__UpperCAmelCase = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__UpperCAmelCase = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
__UpperCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__UpperCAmelCase = [dl_manager.iter_files(_A ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_A ):
with open(_A , '''rb''' ) as f:
__UpperCAmelCase = datasets.Features.from_arrow_schema(pq.read_schema(_A ) )
break
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) )
return splits
def a ( self : Optional[Any] , _lowercase : pa.Table ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__UpperCAmelCase = table_cast(_A , self.info.features.arrow_schema )
return pa_table
def a ( self : Optional[int] , _lowercase : Optional[Any] ):
__UpperCAmelCase = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ):
with open(_A , '''rb''' ) as f:
__UpperCAmelCase = pq.ParquetFile(_A )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__UpperCAmelCase = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' )
raise
| 332
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : str ) -> list[int]:
return [ord(A ) - 9_6 for elem in plain]
def __UpperCAmelCase ( A : list[int] ) -> str:
return "".join(chr(elem + 9_6 ) for elem in encoded )
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : Tuple = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , A )
print('''Decoded:''' , decode(A ) )
if __name__ == "__main__":
main()
| 304
| 0
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =PriorTransformer
snake_case_ ="""hidden_states"""
@property
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = 4
lowerCAmelCase__ : Union[str, Any] = 8
lowerCAmelCase__ : Optional[int] = 7
lowerCAmelCase__ : List[Any] = floats_tensor((batch_size, embedding_dim) ).to(_A )
lowerCAmelCase__ : Optional[int] = floats_tensor((batch_size, embedding_dim) ).to(_A )
lowerCAmelCase__ : Tuple = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCAmelCase__ (self ,__lowerCamelCase=0 ) -> str:
"""simple docstring"""
torch.manual_seed(_A )
lowerCAmelCase__ : List[Any] = 4
lowerCAmelCase__ : Optional[int] = 8
lowerCAmelCase__ : Optional[int] = 7
lowerCAmelCase__ : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(_A )
lowerCAmelCase__ : Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(_A )
lowerCAmelCase__ : Any = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
return (4, 8)
@property
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
return (4, 8)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
lowerCAmelCase__ : int = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' ,output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(_A )
lowerCAmelCase__ : Optional[Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = self.model_class(**_A )
lowerCAmelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : int = [*signature.parameters.keys()]
lowerCAmelCase__ : Tuple = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] ,_A )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
lowerCAmelCase__ : List[Any] = model.to(_A )
if hasattr(_A ,'''set_default_attn_processor''' ):
model.set_default_attn_processor()
lowerCAmelCase__ : Optional[int] = self.get_dummy_seed_input()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**_A )[0]
lowerCAmelCase__ : Any = output[0, :5].flatten().cpu()
print(_A )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowerCAmelCase__ : Dict = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(_A ,_A ,rtol=1e-2 ) )
@slow
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ,__lowerCamelCase=1 ,__lowerCamelCase=7_68 ,__lowerCamelCase=77 ,__lowerCamelCase=0 ) -> int:
"""simple docstring"""
torch.manual_seed(_A )
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Optional[int] = embedding_dim
lowerCAmelCase__ : Union[str, Any] = num_embeddings
lowerCAmelCase__ : Tuple = torch.randn((batch_size, embedding_dim) ).to(_A )
lowerCAmelCase__ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_A )
lowerCAmelCase__ : int = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[str] = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' ,subfolder='''prior''' )
model.to(_A )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_seed_input(seed=_A )
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(**_A )[0]
assert list(sample.shape ) == [1, 7_68]
lowerCAmelCase__ : Optional[int] = sample[0, :8].flatten().cpu()
print(_A )
lowerCAmelCase__ : str = torch.tensor(_A )
assert torch_all_close(_A ,_A ,atol=1e-3 )
| 129
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 304
| 0
|
from importlib import import_module
from .logging import get_logger
__snake_case = get_logger(__name__)
class __lowerCamelCase :
def __init__( self: int,A_: str,A_: Optional[int]=None ):
'''simple docstring'''
__UpperCamelCase = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self,_A,getattr(_A,_A ) )
__UpperCamelCase = module._original_module if isinstance(_A,_PatchedModuleObj ) else module
class __lowerCamelCase :
_lowercase = []
def __init__( self: Optional[Any],A_: Optional[int],A_: str,A_: List[Any],A_: int=None ):
'''simple docstring'''
__UpperCamelCase = obj
__UpperCamelCase = target
__UpperCamelCase = new
__UpperCamelCase = target.split('.' )[0]
__UpperCamelCase = {}
__UpperCamelCase = attrs or []
def __enter__( self: Any ):
'''simple docstring'''
__UpperCamelCase = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_A ) ):
try:
__UpperCamelCase = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__UpperCamelCase = getattr(self.obj,_A )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_A,_PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__UpperCamelCase = obj_attr
# patch at top level
setattr(self.obj,_A,_PatchedModuleObj(_A,attrs=self.attrs ) )
__UpperCamelCase = getattr(self.obj,_A )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_A,_A,_PatchedModuleObj(getattr(_A,_A,_A ),attrs=self.attrs ) )
__UpperCamelCase = getattr(_A,_A )
# finally set the target attribute
setattr(_A,_A,self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__UpperCamelCase = getattr(import_module('.'.join(_A ) ),_A )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj,_A ) is attr_value:
__UpperCamelCase = getattr(self.obj,_A )
setattr(self.obj,_A,self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__UpperCamelCase = globals()['''__builtins__'''][target_attr]
setattr(self.obj,_A,self.new )
else:
raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self: int,*A_: Any ):
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj,_A,self.original.pop(_A ) )
def snake_case_ ( self: int ):
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 310
|
'''simple docstring'''
def __UpperCAmelCase ( A : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
UpperCAmelCase_ : int = gray_code_sequence_string(A )
#
# convert them to integers
for i in range(len(A ) ):
UpperCAmelCase_ : List[str] = int(sequence[i] , 2 )
return sequence
def __UpperCAmelCase ( A : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCAmelCase_ : Tuple = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCAmelCase_ : List[str] = gray_code_sequence_string(bit_count - 1 )
UpperCAmelCase_ : int = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCAmelCase_ : Union[str, Any] = '''0''' + smaller_sequence[i]
sequence.append(A )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCAmelCase_ : Dict = '''1''' + smaller_sequence[i]
sequence.append(A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 0
|
from __future__ import annotations
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
print(F'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(UpperCamelCase_ ):
print(F'{i}\t\t{d}' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
for j in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list[float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [float('inf' )] * vertex_count
SCREAMING_SNAKE_CASE__ = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
SCREAMING_SNAKE_CASE__ = distance[u] + w
SCREAMING_SNAKE_CASE__ = check_negative_cycle(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = int(input("""Enter number of vertices: """).strip())
__snake_case = int(input("""Enter number of edges: """).strip())
__snake_case = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
__snake_case = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
__snake_case = {'src': src, 'dst': dest, 'weight': weight}
__snake_case = int(input("""\nEnter shortest path source:""").strip())
__snake_case = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 176
|
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_UpperCamelCase : Any = logging.getLogger(__name__)
class snake_case__ ( UpperCamelCase):
a_ = "masked_bert"
def __init__( self : str , _A : Dict=3_05_22 , _A : Dict=7_68 , _A : Union[str, Any]=12 , _A : str=12 , _A : str=30_72 , _A : Dict="gelu" , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : Any=5_12 , _A : Union[str, Any]=2 , _A : Union[str, Any]=0.02 , _A : int=1e-12 , _A : Any=0 , _A : Any="topK" , _A : List[str]="constant" , _A : Dict=0.0 , **_A : int , ) -> Union[str, Any]:
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = pruning_method
UpperCAmelCase_ : Optional[int] = mask_init
UpperCAmelCase_ : List[Any] = mask_scale
| 304
| 0
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: Optional[int]=10 ) -> int:
UpperCamelCase__ : List[str] = []
for _ in range(__UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Optional[Any]=10 ) -> Dict:
UpperCamelCase__ : Optional[int] = []
for step in range(__UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : Union[str, Any] = os.path.join(__UpperCAmelCase , '''schedule.bin''' )
torch.save(scheduler.state_dict() , __UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] = torch.load(__UpperCAmelCase )
scheduler.load_state_dict(__UpperCAmelCase )
return lrs
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(len(_A ), len(_A ) )
for a, b in zip(_A, _A ):
self.assertAlmostEqual(_A, _A, delta=_A )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = torch.tensor([0.1, -0.2, -0.1], requires_grad=_A )
UpperCamelCase__ : Tuple = torch.tensor([0.4, 0.2, -0.5] )
UpperCamelCase__ : Optional[int] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCamelCase__ : Any = AdamW(params=[w], lr=2E-1, weight_decay=0.0 )
for _ in range(100 ):
UpperCamelCase__ : Dict = criterion(_A, _A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1E-2 )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[str] = torch.tensor([0.1, -0.2, -0.1], requires_grad=_A )
UpperCamelCase__ : int = torch.tensor([0.4, 0.2, -0.5] )
UpperCamelCase__ : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCamelCase__ : Any = Adafactor(
params=[w], lr=1E-2, eps=(1E-30, 1E-3), clip_threshold=1.0, decay_rate=-0.8, betaa=_A, weight_decay=0.0, relative_step=_A, scale_parameter=_A, warmup_init=_A, )
for _ in range(1000 ):
UpperCamelCase__ : List[Any] = criterion(_A, _A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1E-2 )
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
a : str = nn.Linear(50 , 50 ) if is_torch_available() else None
a : Optional[int] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
a : Any = 10
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__=None ) -> List[Any]:
"""simple docstring"""
self.assertEqual(len(_A ), len(_A ) )
for a, b in zip(_A, _A ):
self.assertAlmostEqual(_A, _A, delta=_A, msg=_A )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCamelCase__ : List[str] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
UpperCamelCase__ : Any = data
UpperCamelCase__ : List[str] = scheduler_func(self.optimizer, **_A )
self.assertEqual(len([scheduler.get_lr()[0]] ), 1 )
UpperCamelCase__ : Optional[int] = unwrap_schedule(_A, self.num_steps )
self.assertListAlmostEqual(
_A, _A, tol=1E-2, msg=f"failed for {scheduler_func} in normal scheduler", )
UpperCamelCase__ : int = scheduler_func(self.optimizer, **_A )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_A ) # wrap to test picklability of the schedule
UpperCamelCase__ : List[str] = unwrap_and_save_reload_schedule(_A, self.num_steps )
self.assertListEqual(_A, _A, msg=f"failed for {scheduler_func} in save and reload" )
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = fn
def __call__( self, *__magic_name__, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
return self.fn(*_A, **_A )
@classmethod
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = list(map(self, scheduler.lr_lambdas ) )
| 201
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = StableDiffusionDiffEditPipeline
a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
a_ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a_ = frozenset([])
def A ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
UpperCAmelCase_ : Optional[int] = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_zero=_A , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : str , _A : List[str] , _A : Any=0 ) -> str:
UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Any = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : str = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Tuple , _A : Optional[Any] , _A : Optional[Any]=0 ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : int = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Dict = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Any = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[Any] = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : int , _A : Tuple , _A : List[str]=0 ) -> Any:
UpperCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[int] = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : List[str] ) -> Optional[Any]:
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Any = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_A )
UpperCAmelCase_ : str = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase_ : Any = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"`{optional_component}` did not stay set to None after loading." , )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_A )
UpperCAmelCase_ : List[Any] = pipe_loaded(**_A )[0]
UpperCAmelCase_ : Any = np.abs(output - output_loaded ).max()
self.assertLess(_A , 1e-4 )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : Optional[Any] = '''cpu'''
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_mask_inputs(_A )
UpperCAmelCase_ : int = pipe.generate_mask(**_A )
UpperCAmelCase_ : Tuple = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase_ : List[Any] = np.array([0] * 9 )
UpperCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def A ( self : str ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = '''cpu'''
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : str = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : List[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : int = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
def A ( self : Tuple ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : Any = '''cpu'''
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
UpperCAmelCase_ : Any = DPMSolverMultistepScheduler(**_A )
UpperCAmelCase_ : Optional[Any] = DPMSolverMultistepInverseScheduler(**_A )
UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : List[Any] = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase):
def A ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def A ( cls : Dict ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
UpperCAmelCase_ : int = raw_image.convert('''RGB''' ).resize((7_68, 7_68) )
UpperCAmelCase_ : Any = raw_image
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : int = torch.manual_seed(0 )
UpperCAmelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : List[str] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Tuple = '''a bowl of pears'''
UpperCAmelCase_ : Optional[int] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[str] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A ).latents
UpperCAmelCase_ : Any = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : str = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
def A ( self : Tuple ) -> List[str]:
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Dict = '''a bowl of pears'''
UpperCAmelCase_ : Union[str, Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[Any] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A , num_inference_steps=25 , ).latents
UpperCAmelCase_ : Dict = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : Tuple = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 304
| 0
|
'''simple docstring'''
from math import isqrt, loga
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[str] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : int = False
return [i for i in range(2 , UpperCAmelCase_ ) if is_prime[i]]
def A__ ( UpperCAmelCase_ = 8_0_0_8_0_0 , UpperCAmelCase_ = 8_0_0_8_0_0 ):
_UpperCamelCase : Union[str, Any] = degree * loga(UpperCAmelCase_ )
_UpperCamelCase : Tuple = int(UpperCAmelCase_ )
_UpperCamelCase : Any = calculate_prime_numbers(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : str = 0
_UpperCamelCase : List[Any] = len(UpperCAmelCase_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 83
|
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( UpperCamelCase):
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase_ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class snake_case__ :
def __init__( self : List[Any] , _A : List[str] , _A : Optional[Any]=13 , _A : List[str]=64 , _A : Tuple=3 , _A : int=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Union[str, Any]=[1, 2, 10] , _A : List[Any]=[7, 3, 3] , _A : Optional[Any]=[4, 2, 2] , _A : List[Any]=[2, 1, 1] , _A : Union[str, Any]=[2, 2, 2] , _A : Tuple=[False, False, True] , _A : str=[0.0, 0.0, 0.0] , _A : List[Any]=0.02 , _A : int=1e-12 , _A : Optional[int]=True , _A : List[str]=True , _A : Union[str, Any]=2 , ) -> List[Any]:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Tuple = patch_sizes
UpperCAmelCase_ : int = patch_stride
UpperCAmelCase_ : Any = patch_padding
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : Tuple = stride_kv
UpperCAmelCase_ : Optional[Any] = depth
UpperCAmelCase_ : Dict = cls_token
UpperCAmelCase_ : Dict = attention_drop_rate
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ) -> int:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A ( self : Dict , _A : List[Any] , _A : Tuple , _A : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = CvtModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = model(_A )
UpperCAmelCase_ : List[str] = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase_ : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase_ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A ( self : Any , _A : int , _A : str , _A : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : str = CvtForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : int = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = config_and_inputs
UpperCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
a_ = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = CvtModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A ( self : Any ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : int ) -> List[str]:
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def A ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def A ( self : Any ) -> Optional[Any]:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def A ( self : List[Any] ) -> Any:
pass
def A ( self : int ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_A )
UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : Dict ) -> List[str]:
def check_hidden_states_output(_A : Dict , _A : str , _A : int ):
UpperCAmelCase_ : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase_ : Optional[Any] = outputs.hidden_states
UpperCAmelCase_ : Any = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Dict = True
check_hidden_states_output(_A , _A , _A )
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : List[Any] ) -> Optional[Any]:
pass
@slow
def A ( self : Optional[int] ) -> int:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = CvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase):
@cached_property
def A ( self : Union[str, Any] ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : str = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_A )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_A )
# verify the logits
UpperCAmelCase_ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 304
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
snake_case : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
snake_case : List[str] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
snake_case : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowerCAmelCase_ ( _snake_case : str ) -> Optional[Any]:
'''simple docstring'''
with open(_snake_case , "rb" ) as f:
__magic_name__ : int = Image.open(_snake_case )
return im.convert("RGB" )
@dataclass
class _snake_case :
UpperCamelCase__ = field(
default=snake_case , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCamelCase__ = field(default=snake_case , metadata={'help': 'A folder containing the training data.'} )
UpperCamelCase__ = field(default=snake_case , metadata={'help': 'A folder containing the validation data.'} )
UpperCamelCase__ = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
UpperCamelCase__ = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase__ = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def SCREAMING_SNAKE_CASE ( self ):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class _snake_case :
UpperCamelCase__ = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(snake_case )} , )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
UpperCamelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCamelCase__ = field(default=snake_case , metadata={'help': 'Name or path of preprocessor config.'} )
UpperCamelCase__ = field(
default=snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Tuple = torch.stack([example["pixel_values"] for example in examples] )
__magic_name__ : Optional[int] = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__ : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__magic_name__ : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__magic_name__ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__magic_name__ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__magic_name__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
__magic_name__ : str = {}
if data_args.train_dir is not None:
__magic_name__ : int = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
__magic_name__ : str = os.path.join(data_args.validation_dir , "**" )
__magic_name__ : Optional[int] = load_dataset(
"imagefolder" , data_files=_snake_case , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
__magic_name__ : Union[str, Any] = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
__magic_name__ : List[str] = dataset['''train'''].train_test_split(data_args.train_val_split )
__magic_name__ : str = split['''train''']
__magic_name__ : Optional[Any] = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__magic_name__ : List[Any] = dataset['''train'''].features['''labels'''].names
__magic_name__ : Tuple = {}, {}
for i, label in enumerate(_snake_case ):
__magic_name__ : Tuple = str(_snake_case )
__magic_name__ : Dict = label
# Load the accuracy metric from the datasets package
__magic_name__ : List[str] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_snake_case : int ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__magic_name__ : Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_snake_case ) , labelaid=_snake_case , idalabel=_snake_case , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__magic_name__ : List[str] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__magic_name__ : int = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__magic_name__ : Dict = image_processor.size['''shortest_edge''']
else:
__magic_name__ : Optional[Any] = (image_processor.size['''height'''], image_processor.size['''width'''])
__magic_name__ : str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__magic_name__ : str = Compose(
[
RandomResizedCrop(_snake_case ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__magic_name__ : List[str] = Compose(
[
Resize(_snake_case ),
CenterCrop(_snake_case ),
ToTensor(),
normalize,
] )
def train_transforms(_snake_case : Optional[Any] ):
__magic_name__ : str = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(_snake_case : List[Any] ):
__magic_name__ : List[Any] = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
__magic_name__ : List[str] = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
__magic_name__ : Optional[Any] = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_snake_case )
# Initalize our trainer
__magic_name__ : Union[str, Any] = Trainer(
model=_snake_case , args=_snake_case , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=_snake_case , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
__magic_name__ : Any = None
if training_args.resume_from_checkpoint is not None:
__magic_name__ : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__magic_name__ : Dict = last_checkpoint
__magic_name__ : Tuple = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__magic_name__ : List[str] = trainer.evaluate()
trainer.log_metrics("eval" , _snake_case )
trainer.save_metrics("eval" , _snake_case )
# Write model card and (optionally) push to hub
__magic_name__ : str = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
if __name__ == "__main__":
main()
| 281
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase)
class snake_case__ ( UpperCamelCase):
a_ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True})
a_ = Features({"text": Value("string")})
a_ = Features({})
a_ = "text"
@property
def A ( self : List[str] ) -> Dict[str, str]:
return {self.text_column: "text"}
| 304
| 0
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __snake_case ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = ConsistencyModelPipeline
lowerCAmelCase_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCAmelCase_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowerCAmelCase_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def __a ( self : Dict , _lowercase : List[str]=False ):
"""simple docstring"""
if class_cond:
SCREAMING_SNAKE_CASE__ = self.dummy_cond_unet
else:
SCREAMING_SNAKE_CASE__ = self.dummy_uncond_unet
# Default to CM multistep sampler
SCREAMING_SNAKE_CASE__ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
SCREAMING_SNAKE_CASE__ = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def __a ( self : int , _lowercase : Dict , _lowercase : List[str]=0 ):
"""simple docstring"""
if str(_A ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE__ = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = ConsistencyModelPipeline(**_A )
SCREAMING_SNAKE_CASE__ = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE__ = pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components(class_cond=_A )
SCREAMING_SNAKE_CASE__ = ConsistencyModelPipeline(**_A )
SCREAMING_SNAKE_CASE__ = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = ConsistencyModelPipeline(**_A )
SCREAMING_SNAKE_CASE__ = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components(class_cond=_A )
SCREAMING_SNAKE_CASE__ = ConsistencyModelPipeline(**_A )
SCREAMING_SNAKE_CASE__ = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self : List[str] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Tuple , _lowercase : Tuple=0 , _lowercase : Tuple=False , _lowercase : int="cpu" , _lowercase : Any=torch.floataa , _lowercase : Dict=(1, 3, 64, 64) ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(_A )
SCREAMING_SNAKE_CASE__ = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
SCREAMING_SNAKE_CASE__ = self.get_fixed_latents(seed=_A , device=_A , dtype=_A , shape=_A )
SCREAMING_SNAKE_CASE__ = latents
return inputs
def __a ( self : List[Any] , _lowercase : Tuple=0 , _lowercase : Optional[Any]="cpu" , _lowercase : Union[str, Any]=torch.floataa , _lowercase : List[Any]=(1, 3, 64, 64) ):
"""simple docstring"""
if type(_A ) == str:
SCREAMING_SNAKE_CASE__ = torch.device(_A )
SCREAMING_SNAKE_CASE__ = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE__ = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
return latents
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
SCREAMING_SNAKE_CASE__ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
SCREAMING_SNAKE_CASE__ = ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE__ = self.get_inputs()
SCREAMING_SNAKE_CASE__ = pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
SCREAMING_SNAKE_CASE__ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
SCREAMING_SNAKE_CASE__ = ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE__ = self.get_inputs()
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
SCREAMING_SNAKE_CASE__ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
SCREAMING_SNAKE_CASE__ = ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE__ = self.get_inputs(get_fixed_latents=_A , device=_A )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_A , enable_math=_A , enable_mem_efficient=_A ):
SCREAMING_SNAKE_CASE__ = pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
SCREAMING_SNAKE_CASE__ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
SCREAMING_SNAKE_CASE__ = ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE__ = self.get_inputs(get_fixed_latents=_A , device=_A )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_A , enable_math=_A , enable_mem_efficient=_A ):
SCREAMING_SNAKE_CASE__ = pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 219
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __UpperCAmelCase ( A : int , A : Any="shi-labs/oneformer_demo" ) -> Dict:
with open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) as f:
UpperCAmelCase_ : Union[str, Any] = json.load(A )
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : str = []
for key, info in class_info.items():
UpperCAmelCase_ : Tuple = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(A ) )
UpperCAmelCase_ : Any = thing_ids
UpperCAmelCase_ : Union[str, Any] = class_names
return metadata
class snake_case__ ( unittest.TestCase):
def __init__( self : Any , _A : str , _A : Optional[int]=7 , _A : Tuple=3 , _A : Tuple=30 , _A : List[Any]=4_00 , _A : Tuple=None , _A : Optional[Any]=True , _A : Optional[Any]=True , _A : Any=[0.5, 0.5, 0.5] , _A : Any=[0.5, 0.5, 0.5] , _A : List[str]=10 , _A : Optional[int]=False , _A : Union[str, Any]=2_55 , _A : List[Any]="shi-labs/oneformer_demo" , _A : str="ade20k_panoptic.json" , _A : List[Any]=10 , ) -> Any:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Optional[int] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Tuple = {'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size
UpperCAmelCase_ : int = do_normalize
UpperCAmelCase_ : List[Any] = image_mean
UpperCAmelCase_ : Dict = image_std
UpperCAmelCase_ : str = class_info_file
UpperCAmelCase_ : Optional[Any] = prepare_metadata(_A , _A )
UpperCAmelCase_ : Tuple = num_text
UpperCAmelCase_ : Union[str, Any] = repo_path
# for the post_process_functions
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : Dict = 10
UpperCAmelCase_ : int = 10
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : str = 4
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Union[str, Any] = do_reduce_labels
UpperCAmelCase_ : str = ignore_index
def A ( self : Dict ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A ( self : Any , _A : List[Any] , _A : List[str]=False ) -> Optional[Any]:
if not batched:
UpperCAmelCase_ : Any = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ : Union[str, Any] = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase_ : int = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase_ : List[Any] = self.size['''shortest_edge''']
UpperCAmelCase_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase_ : Dict = self.size['''shortest_edge''']
UpperCAmelCase_ : str = self.size['''shortest_edge''']
else:
UpperCAmelCase_ : Dict = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ : int = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase_ : List[str] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
def A ( self : Tuple ) -> str:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ = image_processing_class
def A ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : int = OneFormerImageProcessorTester(self )
@property
def A ( self : Any ) -> int:
return self.image_processing_tester.prepare_image_processor_dict()
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''ignore_index''' ) )
self.assertTrue(hasattr(_A , '''class_info_file''' ) )
self.assertTrue(hasattr(_A , '''num_text''' ) )
self.assertTrue(hasattr(_A , '''repo_path''' ) )
self.assertTrue(hasattr(_A , '''metadata''' ) )
self.assertTrue(hasattr(_A , '''do_reduce_labels''' ) )
def A ( self : Dict ) -> Dict:
pass
def A ( self : Tuple ) -> Dict:
# Initialize image_processor
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : int = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Tuple:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Dict ) -> Union[str, Any]:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Optional[int] = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : int , _A : Any=False , _A : List[Any]=False , _A : Any="np" ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase_ : Tuple = self.image_processing_tester.num_labels
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
if with_segmentation_maps:
UpperCAmelCase_ : Any = num_labels
if is_instance_map:
UpperCAmelCase_ : Any = list(range(_A ) ) * 2
UpperCAmelCase_ : Optional[Any] = dict(enumerate(_A ) )
UpperCAmelCase_ : Dict = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase_ : Dict = [Image.fromarray(_A ) for annotation in annotations]
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , _A , return_tensors='''pt''' , instance_id_to_semantic_id=_A , pad_and_return_pixel_mask=_A , )
return inputs
def A ( self : int ) -> str:
pass
def A ( self : Tuple ) -> Union[str, Any]:
def common(_A : Optional[int]=False , _A : str=None ):
UpperCAmelCase_ : List[str] = self.comm_get_image_processor_inputs(
with_segmentation_maps=_A , is_instance_map=_A , segmentation_type=_A )
UpperCAmelCase_ : List[Any] = inputs['''mask_labels''']
UpperCAmelCase_ : Optional[Any] = inputs['''class_labels''']
UpperCAmelCase_ : int = inputs['''pixel_values''']
UpperCAmelCase_ : Tuple = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(_A , _A , _A ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_A ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_A )
common(is_instance_map=_A , segmentation_type='''pil''' )
common(is_instance_map=_A , segmentation_type='''pil''' )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : int = np.zeros((20, 50) )
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : List[Any] = binary_mask_to_rle(_A )
self.assertEqual(len(_A ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A ( self : Any ) -> List[Any]:
UpperCAmelCase_ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(_A )
self.assertEqual(len(_A ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase_ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase_ : Any = fature_extractor.post_process_semantic_segmentation(_A , target_sizes=_A )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_instance_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_panoptic_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 304
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowercase : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 318
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCamelCase : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_UpperCamelCase : str = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCamelCase : List[str] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __UpperCAmelCase ( A : Optional[int] ) -> int:
UpperCAmelCase_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , A )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Optional[Any] = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCAmelCase_ : Dict = collections.defaultdict(A )
UpperCAmelCase_ : str = collections.defaultdict(A )
UpperCAmelCase_ : int = collections.defaultdict(A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(A ):
UpperCAmelCase_ : int = None
if _re_tf_models.match(A ) is not None:
UpperCAmelCase_ : Optional[Any] = tf_models
UpperCAmelCase_ : Optional[int] = _re_tf_models.match(A ).groups()[0]
elif _re_flax_models.match(A ) is not None:
UpperCAmelCase_ : int = flax_models
UpperCAmelCase_ : Any = _re_flax_models.match(A ).groups()[0]
elif _re_pt_models.match(A ) is not None:
UpperCAmelCase_ : Union[str, Any] = pt_models
UpperCAmelCase_ : List[Any] = _re_pt_models.match(A ).groups()[0]
if lookup_dict is not None:
while len(A ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCAmelCase_ : Optional[int] = True
break
# Try again after removing the last word in the name
UpperCAmelCase_ : List[Any] = ''''''.join(camel_case_split(A )[:-1] )
UpperCAmelCase_ : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCAmelCase_ : List[Any] = list(A )
all_models.sort()
UpperCAmelCase_ : Dict = {'''model_type''': all_models}
UpperCAmelCase_ : Tuple = [pt_models[t] for t in all_models]
UpperCAmelCase_ : Dict = [tf_models[t] for t in all_models]
UpperCAmelCase_ : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCAmelCase_ : int = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCAmelCase_ : Any = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCAmelCase_ : Union[str, Any] = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCAmelCase_ : int = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCAmelCase_ : Dict = '''AutoTokenizer'''
UpperCAmelCase_ : str = [processors[t] for t in all_models]
return pd.DataFrame(A )
def __UpperCAmelCase ( A : Optional[int] ) -> str:
UpperCAmelCase_ : int = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCAmelCase_ : Tuple = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
UpperCAmelCase_ : Tuple = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(A , A , A ):
# The type of pipeline may not exist in this framework
if not hasattr(A , A ):
continue
# First extract all model_names
UpperCAmelCase_ : List[str] = []
for name in getattr(A , A ).values():
if isinstance(A , A ):
model_names.append(A )
else:
model_names.extend(list(A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __UpperCAmelCase ( A : int , A : Any ) -> Tuple:
UpperCAmelCase_ : Tuple = get_frameworks_table()
UpperCAmelCase_ : Any = Dataset.from_pandas(A )
UpperCAmelCase_ : str = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=A )
UpperCAmelCase_ : Union[str, Any] = Dataset.from_json(A )
UpperCAmelCase_ : Optional[int] = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(A ) )
}
UpperCAmelCase_ : str = update_pipeline_and_auto_class_table(A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCAmelCase_ : Union[str, Any] = sorted(table.keys() )
UpperCAmelCase_ : Optional[Any] = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
UpperCAmelCase_ : Dict = Dataset.from_pandas(A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(A , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(A , '''pipeline_tags.json''' ) )
if commit_sha is not None:
UpperCAmelCase_ : List[str] = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
UpperCAmelCase_ : int = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=A , repo_type='''dataset''' , token=A , commit_message=A , )
def __UpperCAmelCase ( ) -> int:
UpperCAmelCase_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCAmelCase_ : List[str] = transformers_module.pipelines.SUPPORTED_TASKS
UpperCAmelCase_ : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
UpperCAmelCase_ : Optional[Any] = pipeline_tasks[key]['''pt''']
if isinstance(A , (list, tuple) ):
UpperCAmelCase_ : Dict = model[0]
UpperCAmelCase_ : Any = model.__name__
if model not in in_table.values():
missing.append(A )
if len(A ) > 0:
UpperCAmelCase_ : List[Any] = ''', '''.join(A )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_UpperCamelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 304
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__: Dict = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: int = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
a__: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 193
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
_UpperCamelCase : Optional[int] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_UpperCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
a_ = field(
default=UpperCamelCase , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase)} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
a_ = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
a_ = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class snake_case__ :
a_ = field(
default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
a_ = field(
default=UpperCamelCase , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."})
a_ = field(default=UpperCamelCase , metadata={"help": "Whether ot not to use whole word mask."})
a_ = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"})
a_ = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a_ = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."})
a_ = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a_ = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
def __UpperCAmelCase ( A : DataTrainingArguments , A : PreTrainedTokenizer , A : bool = False , A : Optional[str] = None , ) -> List[Any]:
def _dataset(A : Dict , A : str=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=A , file_path=A , block_size=args.block_size , ref_path=A , )
return LineByLineTextDataset(tokenizer=A , file_path=A , block_size=args.block_size )
else:
return TextDataset(
tokenizer=A , file_path=A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=A , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(A ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __UpperCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCAmelCase_ : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
UpperCAmelCase_ : str = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
UpperCAmelCase_ : int = AutoModelWithLMHead.from_config(A )
model.resize_token_embeddings(len(A ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
UpperCAmelCase_ : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCAmelCase_ : Dict = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCAmelCase_ : str = (
get_dataset(A , tokenizer=A , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCAmelCase_ : Any = (
get_dataset(A , tokenizer=A , evaluate=A , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCAmelCase_ : Optional[int] = DataCollatorForPermutationLanguageModeling(
tokenizer=A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCAmelCase_ : Tuple = DataCollatorForWholeWordMask(
tokenizer=A , mlm_probability=data_args.mlm_probability )
else:
UpperCAmelCase_ : List[str] = DataCollatorForLanguageModeling(
tokenizer=A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase_ : Any = Trainer(
model=A , args=A , data_collator=A , train_dataset=A , eval_dataset=A , prediction_loss_only=A , )
# Training
if training_args.do_train:
UpperCAmelCase_ : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=A )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_ : Tuple = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ : Dict = trainer.evaluate()
UpperCAmelCase_ : Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
UpperCAmelCase_ : Optional[int] = {'''perplexity''': perplexity}
UpperCAmelCase_ : int = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(A , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , A , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(A )
return results
def __UpperCAmelCase ( A : Tuple ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 304
| 0
|
'''simple docstring'''
a_ : Tuple = [
(10_00, 'M'),
(9_00, 'CM'),
(5_00, 'D'),
(4_00, 'CD'),
(1_00, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def a_ ( __snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ ={'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
lowerCamelCase_ =0
lowerCamelCase_ =0
while place < len(__snake_case ):
if (place + 1 < len(__snake_case )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def a_ ( __snake_case : int ) -> str:
"""simple docstring"""
lowerCamelCase_ =[]
for arabic, roman in ROMAN:
(lowerCamelCase_) =divmod(__snake_case , __snake_case )
result.append(roman * factor )
if number == 0:
break
return "".join(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_UpperCamelCase : Optional[int] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class snake_case__ ( unittest.TestCase):
@classmethod
def A ( cls : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(_A )
@classmethod
def A ( cls : int ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : List[str] = FlaxBertModel(_A )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A , repo_id='''test-model-flax''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel(_A )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[int] = flatten_dict(modela.params )
UpperCAmelCase_ : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase_ : int = False
return models_are_equal
@require_flax
class snake_case__ ( unittest.TestCase):
def A ( self : Any ) -> Any:
UpperCAmelCase_ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Any = FlaxBertModel(_A )
UpperCAmelCase_ : Tuple = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) )
with self.assertRaises(_A ):
UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Tuple:
UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Tuple = FlaxBertModel(_A )
UpperCAmelCase_ : str = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) , max_shard_size='''10KB''' )
with self.assertRaises(_A ):
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Optional[int]:
UpperCAmelCase_ : int = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Optional[Any] = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
| 304
| 0
|
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_lowercase : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : List[str] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_lowercase : Tuple = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_lowercase : str = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowercase : Optional[int] = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_lowercase : List[str] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def lowercase__ ( snake_case_ :Optional[int] ):
__UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , snake_case_ )
return [m.group(0 ) for m in matches]
def lowercase__ ( ):
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(snake_case_ )
__UpperCAmelCase = collections.defaultdict(snake_case_ )
__UpperCAmelCase = collections.defaultdict(snake_case_ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(snake_case_ ):
__UpperCAmelCase = None
if _re_tf_models.match(snake_case_ ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(snake_case_ ).groups()[0]
elif _re_flax_models.match(snake_case_ ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(snake_case_ ).groups()[0]
elif _re_pt_models.match(snake_case_ ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(snake_case_ ).groups()[0]
if lookup_dict is not None:
while len(snake_case_ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''''''.join(camel_case_split(snake_case_ )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(snake_case_ )
all_models.sort()
__UpperCAmelCase = {'''model_type''': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = '''AutoTokenizer'''
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(snake_case_ )
def lowercase__ ( snake_case_ :Optional[int] ):
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, F'''TF_{model_mapping}''', F'''FLAX_{model_mapping}''']
__UpperCAmelCase = [auto_class, F'''TF_{auto_class}''', F'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(snake_case_ , snake_case_ , snake_case_ ):
# The type of pipeline may not exist in this framework
if not hasattr(snake_case_ , snake_case_ ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(snake_case_ , snake_case_ ).values():
if isinstance(snake_case_ , snake_case_ ):
model_names.append(snake_case_ )
else:
model_names.extend(list(snake_case_ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowercase__ ( snake_case_ :int , snake_case_ :Any ):
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(snake_case_ )
__UpperCAmelCase = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=snake_case_ )
__UpperCAmelCase = Dataset.from_json(snake_case_ )
__UpperCAmelCase = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(snake_case_ ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(snake_case_ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(snake_case_ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(snake_case_ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__UpperCAmelCase = (
F'''Update with commit {commit_sha}\n\nSee: '''
F'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
__UpperCAmelCase = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=snake_case_ , repo_type='''dataset''' , token=snake_case_ , commit_message=snake_case_ , )
def lowercase__ ( ):
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['''pt''']
if isinstance(snake_case_ , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(snake_case_ )
if len(snake_case_ ) > 0:
__UpperCAmelCase = ''', '''.join(snake_case_ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F'''`utils/update_metadata.py`: {msg}. Please add them!''' )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_lowercase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 332
|
'''simple docstring'''
_UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_UpperCamelCase : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
_UpperCamelCase : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 304
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__snake_case : Dict =logging.get_logger(__name__)
__snake_case : int ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
__snake_case : Union[str, Any] ={
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
__snake_case : str ={
'RUCAIBox/mvp': 1_0_2_4,
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =VOCAB_FILES_NAMES
snake_case_ =PRETRAINED_VOCAB_FILES_MAP
snake_case_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ =["""input_ids""", """attention_mask"""]
snake_case_ =MvpTokenizer
def __init__(self ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase="replace" ,__lowerCamelCase="<s>" ,__lowerCamelCase="</s>" ,__lowerCamelCase="</s>" ,__lowerCamelCase="<s>" ,__lowerCamelCase="<unk>" ,__lowerCamelCase="<pad>" ,__lowerCamelCase="<mask>" ,__lowerCamelCase=False ,__lowerCamelCase=True ,**__lowerCamelCase ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
_A ,_A ,tokenizer_file=_A ,errors=_A ,bos_token=_A ,eos_token=_A ,sep_token=_A ,cls_token=_A ,unk_token=_A ,pad_token=_A ,mask_token=_A ,add_prefix_space=_A ,trim_offsets=_A ,**_A ,)
lowerCAmelCase__ : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_A ) != add_prefix_space:
lowerCAmelCase__ : Optional[Any] = getattr(_A ,pre_tok_state.pop('''type''' ) )
lowerCAmelCase__ : Optional[Any] = add_prefix_space
lowerCAmelCase__ : Any = pre_tok_class(**_A )
lowerCAmelCase__ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCAmelCase__ : List[Any] = '''post_processor'''
lowerCAmelCase__ : str = getattr(self.backend_tokenizer ,_A ,_A )
if tokenizer_component_instance:
lowerCAmelCase__ : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase__ : str = tuple(state['''sep'''] )
if "cls" in state:
lowerCAmelCase__ : List[str] = tuple(state['''cls'''] )
lowerCAmelCase__ : List[str] = False
if state.get('''add_prefix_space''' ,_A ) != add_prefix_space:
lowerCAmelCase__ : Optional[int] = add_prefix_space
lowerCAmelCase__ : int = True
if state.get('''trim_offsets''' ,_A ) != trim_offsets:
lowerCAmelCase__ : str = trim_offsets
lowerCAmelCase__ : List[str] = True
if changes_to_apply:
lowerCAmelCase__ : Any = getattr(_A ,state.pop('''type''' ) )
lowerCAmelCase__ : str = component_class(**_A )
setattr(self.backend_tokenizer ,_A ,_A )
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else value
lowerCAmelCase__ : str = value
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> BatchEncoding:
"""simple docstring"""
lowerCAmelCase__ : Dict = kwargs.get('''is_split_into_words''' ,_A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_A ,**_A )
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> BatchEncoding:
"""simple docstring"""
lowerCAmelCase__ : Dict = kwargs.get('''is_split_into_words''' ,_A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_A ,**_A )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
lowerCAmelCase__ : Dict = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=None ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 129
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __UpperCAmelCase ( A : List[str] , A : Any , A : Optional[int] , A : Optional[int] ) -> Optional[Any]:
if isinstance(A , A ):
UpperCAmelCase_ : Any = np.full((len(A ), sequence_length, 2) , A )
else:
UpperCAmelCase_ : int = np.full((len(A ), sequence_length) , A )
for i, tensor in enumerate(A ):
if padding_side == "right":
if isinstance(A , A ):
UpperCAmelCase_ : Tuple = tensor[:sequence_length]
else:
UpperCAmelCase_ : Dict = tensor[:sequence_length]
else:
if isinstance(A , A ):
UpperCAmelCase_ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase_ : int = tensor[:sequence_length]
return out_tensor.tolist()
def __UpperCAmelCase ( A : List[Any] ) -> str:
UpperCAmelCase_ : Dict = ord(A )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
UpperCAmelCase_ : Union[str, Any] = unicodedata.category(A )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class snake_case__ ( UpperCamelCase):
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = -100
a_ = "pt"
def A ( self : List[Any] , _A : Dict ) -> Tuple:
import torch
UpperCAmelCase_ : Dict = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase_ : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase_ : Tuple = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase_ : Any = torch.tensor(batch['''entity_ids'''] ).shape[1]
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase_ : Optional[Any] = [
list(_A ) + [self.label_pad_token_id] * (sequence_length - len(_A )) for label in labels
]
else:
UpperCAmelCase_ : Any = [
[self.label_pad_token_id] * (sequence_length - len(_A )) + list(_A ) for label in labels
]
UpperCAmelCase_ : Union[str, Any] = [feature['''ner_tags'''] for feature in features]
UpperCAmelCase_ : Union[str, Any] = padding_tensor(_A , -1 , _A , _A )
UpperCAmelCase_ : List[str] = [feature['''original_entity_spans'''] for feature in features]
UpperCAmelCase_ : int = padding_tensor(_A , (-1, -1) , _A , _A )
UpperCAmelCase_ : Union[str, Any] = {k: torch.tensor(_A , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 304
| 0
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowerCamelCase (_a ):
def __init__( self: Optional[int],*A_: Tuple,A_: Optional[Any]=None,A_: Union[str, Any]=None,**A_: Dict ):
'''simple docstring'''
super().__init__(*_A,**_A )
__UpperCamelCase = eval_examples
__UpperCamelCase = post_process_function
def snake_case_ ( self: Optional[Any],A_: str=None,A_: Optional[Any]=None,A_: List[str]=None,A_: str = "eval" ):
'''simple docstring'''
__UpperCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset
__UpperCamelCase = self.get_eval_dataloader(_A )
__UpperCamelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCamelCase = self.compute_metrics
__UpperCamelCase = None
__UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__UpperCamelCase = time.time()
try:
__UpperCamelCase = eval_loop(
_A,description='Evaluation',prediction_loss_only=True if compute_metrics is None else None,ignore_keys=_A,metric_key_prefix=_A,)
finally:
__UpperCamelCase = compute_metrics
__UpperCamelCase = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_A,_A,num_samples=output.num_samples,num_steps=math.ceil(output.num_samples / total_batch_size ),) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__UpperCamelCase = self.post_process_function(_A,_A,output.predictions )
__UpperCamelCase = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__UpperCamelCase = metrics.pop(_A )
metrics.update(output.metrics )
else:
__UpperCamelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__UpperCamelCase = self.callback_handler.on_evaluate(self.args,self.state,self.control,_A )
return metrics
def snake_case_ ( self: List[str],A_: Optional[Any],A_: List[str],A_: Tuple=None,A_: str = "test" ):
'''simple docstring'''
__UpperCamelCase = self.get_test_dataloader(_A )
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCamelCase = self.compute_metrics
__UpperCamelCase = None
__UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__UpperCamelCase = time.time()
try:
__UpperCamelCase = eval_loop(
_A,description='Prediction',prediction_loss_only=True if compute_metrics is None else None,ignore_keys=_A,metric_key_prefix=_A,)
finally:
__UpperCamelCase = compute_metrics
__UpperCamelCase = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_A,_A,num_samples=output.num_samples,num_steps=math.ceil(output.num_samples / total_batch_size ),) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__UpperCamelCase = self.post_process_function(_A,_A,output.predictions,'predict' )
__UpperCamelCase = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__UpperCamelCase = metrics.pop(_A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions,label_ids=predictions.label_ids,metrics=_A )
| 310
|
'''simple docstring'''
import functools
def __UpperCAmelCase ( A : str , A : str ) -> int:
UpperCAmelCase_ : Optional[Any] = len(A )
UpperCAmelCase_ : List[str] = len(A )
@functools.cache
def min_distance(A : int , A : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , A ) , 1 + min_distance(A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = 42
A__ = 42
class A ( nn.Module ):
'''simple docstring'''
A__ = 42
A__ = (16, 32, 96, 2_56)
A__ = jnp.floataa
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
lowercase__ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase__ = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase__ = self.block_out_channels[i]
lowercase__ = self.block_out_channels[i + 1]
lowercase__ = nn.Conv(
_UpperCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_UpperCAmelCase )
lowercase__ = nn.Conv(
_UpperCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_UpperCAmelCase )
lowercase__ = blocks
lowercase__ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__(self : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.conv_in(_UpperCAmelCase )
lowercase__ = nn.silu(_UpperCAmelCase )
for block in self.blocks:
lowercase__ = block(_UpperCAmelCase )
lowercase__ = nn.silu(_UpperCAmelCase )
lowercase__ = self.conv_out(_UpperCAmelCase )
return embedding
@flax_register_to_config
class A ( nn.Module , UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
A__ = 32
A__ = 4
A__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
A__ = False
A__ = (3_20, 6_40, 12_80, 12_80)
A__ = 2
A__ = 8
A__ = None
A__ = 12_80
A__ = 0.0
A__ = False
A__ = jnp.floataa
A__ = True
A__ = 0
A__ = "rgb"
A__ = (16, 32, 96, 2_56)
def lowerCamelCase__ (self : Any , _UpperCAmelCase : jax.random.KeyArray ) -> FrozenDict:
"""simple docstring"""
lowercase__ = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase__ = jnp.zeros(_UpperCAmelCase , dtype=jnp.floataa )
lowercase__ = jnp.ones((1,) , dtype=jnp.intaa )
lowercase__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase__ = jnp.zeros(_UpperCAmelCase , dtype=jnp.floataa )
lowercase__ , lowercase__ = jax.random.split(_UpperCAmelCase )
lowercase__ = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )["params"]
def lowerCamelCase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.block_out_channels
lowercase__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase__ = self.num_attention_heads or self.attention_head_dim
# input
lowercase__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase__ = FlaxTimestepEmbedding(_UpperCAmelCase , dtype=self.dtype )
lowercase__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase__ = self.only_cross_attention
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase__ = []
lowercase__ = []
lowercase__ = block_out_channels[0]
lowercase__ = nn.Conv(
_UpperCAmelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_UpperCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase__ = output_channel
lowercase__ = block_out_channels[i]
lowercase__ = i == len(_UpperCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase__ = FlaxCrossAttnDownBlockaD(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase__ = FlaxDownBlockaD(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_UpperCAmelCase )
for _ in range(self.layers_per_block ):
lowercase__ = nn.Conv(
_UpperCAmelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_UpperCAmelCase )
if not is_final_block:
lowercase__ = nn.Conv(
_UpperCAmelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_UpperCAmelCase )
lowercase__ = down_blocks
lowercase__ = controlnet_down_blocks
# mid
lowercase__ = block_out_channels[-1]
lowercase__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=_UpperCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase__ = nn.Conv(
_UpperCAmelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__(self : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
lowercase__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase__ = jnp.flip(_UpperCAmelCase , axis=1 )
# 1. time
if not isinstance(_UpperCAmelCase , jnp.ndarray ):
lowercase__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_UpperCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase__ = timesteps.astype(dtype=jnp.floataa )
lowercase__ = jnp.expand_dims(_UpperCAmelCase , 0 )
lowercase__ = self.time_proj(_UpperCAmelCase )
lowercase__ = self.time_embedding(_UpperCAmelCase )
# 2. pre-process
lowercase__ = jnp.transpose(_UpperCAmelCase , (0, 2, 3, 1) )
lowercase__ = self.conv_in(_UpperCAmelCase )
lowercase__ = jnp.transpose(_UpperCAmelCase , (0, 2, 3, 1) )
lowercase__ = self.controlnet_cond_embedding(_UpperCAmelCase )
sample += controlnet_cond
# 3. down
lowercase__ = (sample,)
for down_block in self.down_blocks:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ , lowercase__ = down_block(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , deterministic=not train )
else:
lowercase__ , lowercase__ = down_block(_UpperCAmelCase , _UpperCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase__ = self.mid_block(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , deterministic=not train )
# 5. contronet blocks
lowercase__ = ()
for down_block_res_sample, controlnet_block in zip(_UpperCAmelCase , self.controlnet_down_blocks ):
lowercase__ = controlnet_block(_UpperCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase__ = controlnet_down_block_res_samples
lowercase__ = self.controlnet_mid_block(_UpperCAmelCase )
# 6. scaling
lowercase__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_UpperCAmelCase , mid_block_res_sample=_UpperCAmelCase )
| 305
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : str=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=99 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : List[str]=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[str]=4 , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def lowerCamelCase__ (self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ (self : Tuple ) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = True
A__ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = FlaxBertModelTester(self )
@slow
def lowerCamelCase__ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = FlaxBertModel.from_pretrained("""bert-base-cased""" )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 305
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.