code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
snake_case_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
snake_case_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """whisper"""
__UpperCamelCase = ["""past_key_values"""]
__UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Optional[Any] , lowercase_ :Dict=5_18_65 , lowercase_ :Tuple=80 , lowercase_ :str=6 , lowercase_ :Optional[Any]=4 , lowercase_ :Union[str, Any]=6 , lowercase_ :Any=4 , lowercase_ :Optional[int]=15_36 , lowercase_ :Optional[Any]=15_36 , lowercase_ :Tuple=0.0 , lowercase_ :List[Any]=0.0 , lowercase_ :List[Any]=5_02_57 , lowercase_ :str=True , lowercase_ :Optional[int]=True , lowercase_ :Tuple="gelu" , lowercase_ :Union[str, Any]=2_56 , lowercase_ :Optional[int]=0.0 , lowercase_ :List[Any]=0.0 , lowercase_ :List[str]=0.0 , lowercase_ :Dict=0.02 , lowercase_ :Any=False , lowercase_ :str=15_00 , lowercase_ :Union[str, Any]=4_48 , lowercase_ :List[str]=5_02_56 , lowercase_ :Any=5_02_56 , lowercase_ :Optional[int]=5_02_56 , lowercase_ :List[Any]=None , lowercase_ :Dict=[2_20, 5_02_56] , lowercase_ :Optional[int]=False , lowercase_ :Optional[int]=2_56 , lowercase_ :Tuple=False , lowercase_ :Any=0.05 , lowercase_ :Tuple=10 , lowercase_ :Dict=2 , lowercase_ :Union[str, Any]=0.0 , lowercase_ :List[str]=10 , lowercase_ :str=0 , lowercase_ :Tuple=7 , **lowercase_ :Optional[int] , ) -> Optional[Any]:
UpperCAmelCase = vocab_size
UpperCAmelCase = num_mel_bins
UpperCAmelCase = d_model
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
UpperCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
UpperCAmelCase = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self :Any ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase = {0: 'batch'}
else:
UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase__ ( self :int , lowercase_ :Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ :int = -1 , lowercase_ :int = -1 , lowercase_ :bool = False , lowercase_ :Optional["TensorType"] = None , lowercase_ :int = 2_20_50 , lowercase_ :float = 5.0 , lowercase_ :int = 2_20 , ) -> Mapping[str, Any]:
UpperCAmelCase = OrderedDict()
UpperCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase = encoder_inputs['input_features'].shape[2]
UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = encoder_inputs.pop('input_features' )
UpperCAmelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase__ ( self :Dict ) -> float:
return 1E-3
| 78 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=8 ):
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCAmelCase ( lowercase_ , lowercase_=512 , lowercase_=512 ):
UpperCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase = arr.astype(np.floataa ) / 1_2_7.5 - 1
UpperCAmelCase = np.transpose(lowercase_ , [2, 0, 1] )
UpperCAmelCase = torch.from_numpy(lowercase_ ).unsqueeze(0 )
return image
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :UNetaDConditionModel , lowercase_ :DDPMScheduler , lowercase_ :VQModel , ) -> List[str]:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Any ) -> Optional[int]:
# get the original timestep using init_timestep
UpperCAmelCase = min(int(num_inference_steps * strength ) , lowercase_ )
UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Dict , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Optional[Any] , lowercase_ :Any=None ) -> Any:
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" )
UpperCAmelCase = image.to(device=lowercase_ , dtype=lowercase_ )
UpperCAmelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase = image
else:
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ )
]
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
else:
UpperCAmelCase = self.movq.encode(lowercase_ ).latent_dist.sample(lowercase_ )
UpperCAmelCase = self.movq.config.scaling_factor * init_latents
UpperCAmelCase = torch.cat([init_latents] , dim=0 )
UpperCAmelCase = init_latents.shape
UpperCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
UpperCAmelCase = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = init_latents
return latents
def UpperCAmelCase__ ( self :int , lowercase_ :int=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self :str , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 1_00 , lowercase_ :float = 4.0 , lowercase_ :float = 0.3 , lowercase_ :int = 1 , lowercase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , ) -> List[str]:
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase = image_embeds.shape[0]
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [image]
if not all(isinstance(lowercase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(lowercase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCAmelCase = torch.cat([prepare_image(lowercase_ , lowercase_ , lowercase_ ) for i in image] , dim=0 )
UpperCAmelCase = image.to(dtype=image_embeds.dtype , device=lowercase_ )
UpperCAmelCase = self.movq.encode(lowercase_ )['latents']
UpperCAmelCase = latents.repeat_interleave(lowercase_ , dim=0 )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
UpperCAmelCase = self.prepare_latents(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , image_embeds.dtype , lowercase_ , lowercase_ )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'image_embeds': image_embeds}
UpperCAmelCase = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 78 | 1 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
snake_case_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ = 16000 ):
UpperCAmelCase = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
UpperCAmelCase = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class A_ :
"""simple docstring"""
__UpperCamelCase = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Name of a dataset from the datasets package"""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """A file containing the training audio paths and labels."""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
__UpperCamelCase = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
__UpperCamelCase = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
__UpperCamelCase = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
__UpperCamelCase = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
__UpperCamelCase = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class A_ :
"""simple docstring"""
__UpperCamelCase = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
__UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Name or path of preprocessor config."""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def UpperCAmelCase__ ( self :Dict ) -> Union[str, Any]:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , lowercase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
'Make sure to set `--audio_column_name` to the correct audio column - one of '
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
'Make sure to set `--label_column_name` to the correct text column - one of '
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCAmelCase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCAmelCase = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ ):
UpperCAmelCase = []
for audio in batch[data_args.audio_column_name]:
UpperCAmelCase = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
UpperCAmelCase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase = {model_input_name: inputs.get(lowercase_ )}
UpperCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ ):
UpperCAmelCase = [audio['array'] for audio in batch[data_args.audio_column_name]]
UpperCAmelCase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase = {model_input_name: inputs.get(lowercase_ )}
UpperCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCAmelCase = raw_datasets['train'].features[data_args.label_column_name].names
UpperCAmelCase , UpperCAmelCase = {}, {}
for i, label in enumerate(lowercase_ ):
UpperCAmelCase = str(lowercase_ )
UpperCAmelCase = label
# Load the accuracy metric from the datasets package
UpperCAmelCase = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ ):
UpperCAmelCase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCAmelCase = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCAmelCase = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
UpperCAmelCase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase = trainer.evaluate()
trainer.log_metrics('eval' , lowercase_ )
trainer.save_metrics('eval' , lowercase_ )
# Write model card and (optionally) push to hub
UpperCAmelCase = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 78 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = x
UpperCAmelCase = y
for step in range(lowercase_ ): # noqa: B007
UpperCAmelCase = a * a - b * b + x
UpperCAmelCase = 2 * a * b + y
UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) )
def _lowerCAmelCase ( lowercase_ = 800 , lowercase_ = 600 , lowercase_ = -0.6 , lowercase_ = 0 , lowercase_ = 3.2 , lowercase_ = 50 , lowercase_ = True , ):
UpperCAmelCase = Image.new('RGB' , (image_width, image_height) )
UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(lowercase_ ):
for image_y in range(lowercase_ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase = figure_width / image_width * image_height
UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase = get_distance(lowercase_ , lowercase_ , lowercase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase = get_color_coded_rgb(lowercase_ )
else:
UpperCAmelCase = get_black_and_white_rgb(lowercase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
snake_case_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 78 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
snake_case_ = """\
"""
snake_case_ = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
snake_case_ = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self :str ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def UpperCAmelCase__ ( self :str , lowercase_ :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :int = 16 , lowercase_ :bool = True , lowercase_ :Any=None ) -> Optional[int]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase = 'cuda'
else:
UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase = AutoModelForCausalLM.from_pretrained(lowercase_ )
UpperCAmelCase = model.to(lowercase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowercase_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase = model.config.max_length - 1
else:
UpperCAmelCase = model.config.max_length
UpperCAmelCase = tokenizer(
lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors='pt' , return_attention_mask=lowercase_ , ).to(lowercase_ )
UpperCAmelCase = encodings['input_ids']
UpperCAmelCase = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase = []
UpperCAmelCase = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(lowercase_ ) , lowercase_ ) ):
UpperCAmelCase = min(start_index + batch_size , len(lowercase_ ) )
UpperCAmelCase = encoded_texts[start_index:end_index]
UpperCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowercase_ )
UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(lowercase_ ), attn_mask] , dim=1 )
UpperCAmelCase = encoded_batch
with torch.no_grad():
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ ).logits
UpperCAmelCase = out_logits[..., :-1, :].contiguous()
UpperCAmelCase = labels[..., 1:].contiguous()
UpperCAmelCase = attn_mask[..., 1:].contiguous()
UpperCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , lowercase_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowercase_ )}
| 78 |
"""simple docstring"""
import requests
snake_case_ = """""" # <-- Put your OpenWeatherMap appid here!
snake_case_ = """https://api.openweathermap.org/data/2.5/"""
def _lowerCAmelCase ( lowercase_ = "Chicago" , lowercase_ = APPID ):
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def _lowerCAmelCase ( lowercase_ = "Kolkata, India" , lowercase_ = APPID ):
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def _lowerCAmelCase ( lowercase_ = 5_5.6_8 , lowercase_ = 1_2.5_7 , lowercase_ = APPID ):
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
snake_case_ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 78 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class A_ :
"""simple docstring"""
__UpperCamelCase = 42 # [batch_size x 3]
__UpperCamelCase = 42 # [batch_size x 3]
__UpperCamelCase = 42 # [batch_size x 3]
__UpperCamelCase = 42 # [batch_size x 3]
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def UpperCAmelCase__ ( self :Dict ) -> Optional[Any]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCAmelCase__ ( self :Tuple ) -> Dict:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCAmelCase__ ( self :List[Any] ) -> Any:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCAmelCase__ ( self :List[str] ) -> torch.Tensor:
UpperCAmelCase = torch.arange(self.height * self.width )
UpperCAmelCase = torch.stack(
[
pixel_indices % self.width,
torch.div(lowercase_ , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]:
UpperCAmelCase , *UpperCAmelCase = self.shape
UpperCAmelCase = int(np.prod(lowercase_ ) )
UpperCAmelCase = self.get_image_coords()
UpperCAmelCase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase = self.get_camera_rays(lowercase_ )
UpperCAmelCase = rays.view(lowercase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCAmelCase__ ( self :Dict , lowercase_ :torch.Tensor ) -> torch.Tensor:
UpperCAmelCase , *UpperCAmelCase , UpperCAmelCase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase = coords.view(lowercase_ , -1 , 2 )
UpperCAmelCase = self.resolution()
UpperCAmelCase = self.fov()
UpperCAmelCase = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase = fracs * torch.tan(fov / 2 )
UpperCAmelCase = fracs.view(lowercase_ , -1 , 2 )
UpperCAmelCase = (
self.z.view(lowercase_ , 1 , 3 )
+ self.x.view(lowercase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowercase_ , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase = directions / directions.norm(dim=-1 , keepdim=lowercase_ )
UpperCAmelCase = torch.stack(
[
torch.broadcast_to(self.origin.view(lowercase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowercase_ , *lowercase_ , 2 , 3 )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :int , lowercase_ :int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowercase_ , height=lowercase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
UpperCAmelCase = np.array([np.sin(lowercase_ ), np.cos(lowercase_ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase = -z * 4
UpperCAmelCase = np.array([np.cos(lowercase_ ), -np.sin(lowercase_ ), 0.0] )
UpperCAmelCase = np.cross(lowercase_ , lowercase_ )
origins.append(lowercase_ )
xs.append(lowercase_ )
ys.append(lowercase_ )
zs.append(lowercase_ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase_ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase_ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase_ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase_ , axis=0 ) ).float() , width=lowercase_ , height=lowercase_ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase_ )) , )
| 78 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""image_processor""", """tokenizer"""]
__UpperCamelCase = """LayoutLMv2ImageProcessor"""
__UpperCamelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self :Any , lowercase_ :int=None , lowercase_ :Union[str, Any]=None , **lowercase_ :Optional[Any] ) -> Dict:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
UpperCAmelCase = kwargs.pop('feature_extractor' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :str , lowercase_ :Optional[int] , lowercase_ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ :Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ :Optional[Union[List[int], List[List[int]]]] = None , lowercase_ :bool = True , lowercase_ :Union[bool, str, PaddingStrategy] = False , lowercase_ :Union[bool, str, TruncationStrategy] = None , lowercase_ :Optional[int] = None , lowercase_ :int = 0 , lowercase_ :Optional[int] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[bool] = None , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = True , lowercase_ :Optional[Union[str, TensorType]] = None , **lowercase_ :Any , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase = features['words']
UpperCAmelCase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
UpperCAmelCase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
UpperCAmelCase = self.get_overflowing_images(lowercase_ , encoded_inputs['overflow_to_sample_mapping'] )
UpperCAmelCase = images
return encoded_inputs
def UpperCAmelCase__ ( self :Dict , lowercase_ :List[Any] , lowercase_ :Any ) -> Optional[Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(lowercase_ )} and {len(lowercase_ )}""" )
return images_with_overflow
def UpperCAmelCase__ ( self :Any , *lowercase_ :int , **lowercase_ :Tuple ) -> Tuple:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , *lowercase_ :List[Any] , **lowercase_ :Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ ( self :int ) -> Dict:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 78 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
snake_case_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
snake_case_ = [ord(letter) for letter in string.ascii_lowercase]
snake_case_ = {ord(char) for char in VALID_CHARS}
snake_case_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = ""
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
for keychar, cipherchar in zip(cycle(lowercase_ ) , lowercase_ ):
UpperCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = []
for key in product(lowercase_ , repeat=3 ):
UpperCAmelCase = try_key(lowercase_ , lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return [possible for possible in possibles if common_word in possible.lower()]
def _lowerCAmelCase ( lowercase_ = "p059_cipher.txt" ):
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding='utf-8' )
UpperCAmelCase = [int(lowercase_ ) for number in data.strip().split(',' )]
UpperCAmelCase = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
UpperCAmelCase = filter_common_word(lowercase_ , lowercase_ )
if len(lowercase_ ) == 1:
break
UpperCAmelCase = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 78 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Union[str, Any] ) -> str:
UpperCAmelCase = {}
def UpperCAmelCase__ ( self :Any , lowercase_ :List[Any] , lowercase_ :List[str] , lowercase_ :Dict=1 ) -> List[Any]:
if self.graph.get(lowercase_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCAmelCase = [[w, v]]
if not self.graph.get(lowercase_ ):
UpperCAmelCase = []
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
return list(self.graph )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Optional[int] , lowercase_ :Optional[Any] ) -> Dict:
if self.graph.get(lowercase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Tuple=-2 , lowercase_ :List[Any]=-1 ) -> List[Any]:
if s == d:
return []
UpperCAmelCase = []
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return visited
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int=-1 ) -> Tuple:
if c == -1:
UpperCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(lowercase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase_ , lowercase_ , 1 )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :Optional[Any]=-2 ) -> Optional[int]:
UpperCAmelCase = deque()
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
d.append(lowercase_ )
visited.append(lowercase_ )
while d:
UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[int] ) -> List[Any]:
UpperCAmelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[str] ) -> List[str]:
return len(self.graph[u] )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Any=-2 ) -> int:
UpperCAmelCase = []
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return sorted_nodes
def UpperCAmelCase__ ( self :str ) -> str:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return list(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> Tuple:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return False
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int=-2 , lowercase_ :List[str]=-1 ) -> Any:
UpperCAmelCase = time()
self.dfs(lowercase_ , lowercase_ )
UpperCAmelCase = time()
return end - begin
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[str]=-2 ) -> str:
UpperCAmelCase = time()
self.bfs(lowercase_ )
UpperCAmelCase = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :List[str] ) -> Union[str, Any]:
UpperCAmelCase = {}
def UpperCAmelCase__ ( self :str , lowercase_ :Dict , lowercase_ :Optional[Any] , lowercase_ :Optional[int]=1 ) -> Dict:
# check if the u exists
if self.graph.get(lowercase_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCAmelCase = [[w, v]]
# add the other way
if self.graph.get(lowercase_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCAmelCase = [[w, u]]
def UpperCAmelCase__ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :Tuple ) -> Optional[Any]:
if self.graph.get(lowercase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_ )
# the other way round
if self.graph.get(lowercase_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Optional[int]=-2 , lowercase_ :Optional[int]=-1 ) -> List[str]:
if s == d:
return []
UpperCAmelCase = []
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return visited
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Optional[int]=-1 ) -> Any:
if c == -1:
UpperCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(lowercase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase_ , lowercase_ , 1 )
def UpperCAmelCase__ ( self :Dict , lowercase_ :int=-2 ) -> int:
UpperCAmelCase = deque()
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
d.append(lowercase_ )
visited.append(lowercase_ )
while d:
UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[Any] ) -> str:
return len(self.graph[u] )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Any:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return list(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return False
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Union[str, Any]:
return list(self.graph )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Union[str, Any]=-2 , lowercase_ :List[str]=-1 ) -> str:
UpperCAmelCase = time()
self.dfs(lowercase_ , lowercase_ )
UpperCAmelCase = time()
return end - begin
def UpperCAmelCase__ ( self :Any , lowercase_ :int=-2 ) -> str:
UpperCAmelCase = time()
self.bfs(lowercase_ )
UpperCAmelCase = time()
return end - begin
| 78 | 1 |
"""simple docstring"""
from math import factorial, pi
def _lowerCAmelCase ( lowercase_ , lowercase_ = 30 ):
if not isinstance(lowercase_ , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(lowercase_ , lowercase_ ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
UpperCAmelCase = float(lowercase_ )
UpperCAmelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ = 30 ):
if not isinstance(lowercase_ , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(lowercase_ , lowercase_ ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
UpperCAmelCase = float(lowercase_ )
UpperCAmelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowercase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 78 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 78 | 1 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=1e-12 ):
UpperCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowercase_ , axis=1 ) , a_min=lowercase_ ) ).T
UpperCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowercase_ , axis=1 ) , a_min=lowercase_ ) ).T
return jnp.matmul(lowercase_ , norm_emb_a.T )
class A_ ( nn.Module ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = jnp.floataa
def UpperCAmelCase__ ( self :List[Any] ) -> str:
UpperCAmelCase = FlaxCLIPVisionModule(self.config.vision_config )
UpperCAmelCase = nn.Dense(self.config.projection_dim , use_bias=lowercase_ , dtype=self.dtype )
UpperCAmelCase = self.param('concept_embeds' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
UpperCAmelCase = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCAmelCase = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (17,) )
UpperCAmelCase = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self :List[Any] , lowercase_ :str ) -> int:
UpperCAmelCase = self.vision_model(lowercase_ )[1]
UpperCAmelCase = self.visual_projection(lowercase_ )
UpperCAmelCase = jax_cosine_distance(lowercase_ , self.special_care_embeds )
UpperCAmelCase = jax_cosine_distance(lowercase_ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCAmelCase = 0.0
UpperCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCAmelCase = jnp.round(lowercase_ , 3 )
UpperCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=lowercase_ )
# Use a lower threshold if an image has any special care concept
UpperCAmelCase = is_special_care * 0.01
UpperCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCAmelCase = jnp.round(lowercase_ , 3 )
UpperCAmelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = CLIPConfig
__UpperCamelCase = """clip_input"""
__UpperCamelCase = FlaxStableDiffusionSafetyCheckerModule
def __init__( self :Optional[Any] , lowercase_ :CLIPConfig , lowercase_ :Optional[Tuple] = None , lowercase_ :int = 0 , lowercase_ :jnp.dtype = jnp.floataa , lowercase_ :bool = True , **lowercase_ :List[Any] , ) -> Optional[Any]:
if input_shape is None:
UpperCAmelCase = (1, 2_24, 2_24, 3)
UpperCAmelCase = self.module_class(config=lowercase_ , dtype=lowercase_ , **lowercase_ )
super().__init__(lowercase_ , lowercase_ , input_shape=lowercase_ , seed=lowercase_ , dtype=lowercase_ , _do_init=_do_init )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :jax.random.KeyArray , lowercase_ :Tuple , lowercase_ :FrozenDict = None ) -> FrozenDict:
# init input tensor
UpperCAmelCase = jax.random.normal(lowercase_ , lowercase_ )
UpperCAmelCase , UpperCAmelCase = jax.random.split(lowercase_ )
UpperCAmelCase = {'params': params_rng, 'dropout': dropout_rng}
UpperCAmelCase = self.module.init(lowercase_ , lowercase_ )['params']
return random_params
def __call__( self :List[Any] , lowercase_ :Tuple , lowercase_ :dict = None , ) -> Optional[int]:
UpperCAmelCase = jnp.transpose(lowercase_ , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(lowercase_ , dtype=jnp.floataa ) , rngs={} , )
| 78 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ , lowercase_ = " " ):
UpperCAmelCase = []
UpperCAmelCase = 0
for index, char in enumerate(lowercase_ ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase = index + 1
elif index + 1 == len(lowercase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 78 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case_ = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
snake_case_ = {
"""yjernite/retribert-base-uncased""": 512,
}
snake_case_ = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = RetriBertTokenizer
__UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self :str , lowercase_ :List[str]=None , lowercase_ :List[str]=None , lowercase_ :str=True , lowercase_ :List[str]="[UNK]" , lowercase_ :Optional[Any]="[SEP]" , lowercase_ :Tuple="[PAD]" , lowercase_ :int="[CLS]" , lowercase_ :Optional[Any]="[MASK]" , lowercase_ :str=True , lowercase_ :Dict=None , **lowercase_ :List[Any] , ) -> Union[str, Any]:
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowercase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase_ ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(lowercase_ , normalizer_state.pop('type' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**lowercase_ )
UpperCAmelCase = do_lower_case
def UpperCAmelCase__ ( self :Any , lowercase_ :Any , lowercase_ :Tuple=None ) -> Union[str, Any]:
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str , lowercase_ :Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 78 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
snake_case_ = logging.getLogger(__name__)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = git.Repo(search_parent_directories=lowercase_ )
UpperCAmelCase = {
'repo_id': str(lowercase_ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , 'git_log.json' ) , 'w' ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def _lowerCAmelCase ( lowercase_ ):
if params.n_gpu <= 0:
UpperCAmelCase = 0
UpperCAmelCase = -1
UpperCAmelCase = True
UpperCAmelCase = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCAmelCase = int(os.environ['WORLD_SIZE'] )
UpperCAmelCase = int(os.environ['N_GPU_NODE'] )
UpperCAmelCase = int(os.environ['RANK'] )
# number of nodes / node ID
UpperCAmelCase = params.world_size // params.n_gpu_per_node
UpperCAmelCase = params.global_rank // params.n_gpu_per_node
UpperCAmelCase = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 1
UpperCAmelCase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCAmelCase = params.node_id == 0 and params.local_rank == 0
UpperCAmelCase = params.n_nodes > 1
# summary
UpperCAmelCase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _lowerCAmelCase ( lowercase_ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 78 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
snake_case_ = [
"""openmmlab/upernet-convnext-tiny""",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
snake_case_ = """UperNetConfig"""
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self :Tuple , lowercase_ :int , lowercase_ :int , lowercase_ :Union[int, Tuple[int, int]] , lowercase_ :Union[int, Tuple[int, int], str] = 0 , lowercase_ :bool = False , lowercase_ :Union[int, Tuple[int, int]] = 1 , ) -> None:
super().__init__()
UpperCAmelCase = nn.Convad(
in_channels=lowercase_ , out_channels=lowercase_ , kernel_size=lowercase_ , padding=lowercase_ , bias=lowercase_ , dilation=lowercase_ , )
UpperCAmelCase = nn.BatchNormad(lowercase_ )
UpperCAmelCase = nn.ReLU()
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :torch.Tensor ) -> torch.Tensor:
UpperCAmelCase = self.conv(lowercase_ )
UpperCAmelCase = self.batch_norm(lowercase_ )
UpperCAmelCase = self.activation(lowercase_ )
return output
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :int , lowercase_ :int , lowercase_ :int ) -> None:
super().__init__()
UpperCAmelCase = [
nn.AdaptiveAvgPoolad(lowercase_ ),
UperNetConvModule(lowercase_ , lowercase_ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(lowercase_ ) , lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :torch.Tensor ) -> torch.Tensor:
UpperCAmelCase = input
for layer in self.layers:
UpperCAmelCase = layer(lowercase_ )
return hidden_state
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :Tuple[int, ...] , lowercase_ :int , lowercase_ :int , lowercase_ :bool ) -> None:
super().__init__()
UpperCAmelCase = pool_scales
UpperCAmelCase = align_corners
UpperCAmelCase = in_channels
UpperCAmelCase = channels
UpperCAmelCase = []
for i, pool_scale in enumerate(lowercase_ ):
UpperCAmelCase = UperNetPyramidPoolingBlock(pool_scale=lowercase_ , in_channels=lowercase_ , channels=lowercase_ )
self.blocks.append(lowercase_ )
self.add_module(str(lowercase_ ) , lowercase_ )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :torch.Tensor ) -> List[torch.Tensor]:
UpperCAmelCase = []
for ppm in self.blocks:
UpperCAmelCase = ppm(lowercase_ )
UpperCAmelCase = nn.functional.interpolate(
lowercase_ , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(lowercase_ )
return ppm_outs
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :Optional[Any] , lowercase_ :Optional[int] ) -> Any:
super().__init__()
UpperCAmelCase = config
UpperCAmelCase = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCAmelCase = in_channels
UpperCAmelCase = config.hidden_size
UpperCAmelCase = False
UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCAmelCase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCAmelCase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCAmelCase = nn.ModuleList()
UpperCAmelCase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCAmelCase = UperNetConvModule(lowercase_ , self.channels , kernel_size=1 )
UpperCAmelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(lowercase_ )
self.fpn_convs.append(lowercase_ )
UpperCAmelCase = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[Any]:
self.apply(self._init_weights )
def UpperCAmelCase__ ( self :str , lowercase_ :Union[str, Any] ) -> str:
if isinstance(lowercase_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self :Dict , lowercase_ :int ) -> int:
UpperCAmelCase = inputs[-1]
UpperCAmelCase = [x]
psp_outs.extend(self.psp_modules(lowercase_ ) )
UpperCAmelCase = torch.cat(lowercase_ , dim=1 )
UpperCAmelCase = self.bottleneck(lowercase_ )
return output
def UpperCAmelCase__ ( self :str , lowercase_ :torch.Tensor ) -> torch.Tensor:
# build laterals
UpperCAmelCase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(lowercase_ ) )
# build top-down path
UpperCAmelCase = len(lowercase_ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase = laterals[i - 1].shape[2:]
UpperCAmelCase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowercase_ , mode='bilinear' , align_corners=self.align_corners )
# build outputs
UpperCAmelCase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
UpperCAmelCase = torch.cat(lowercase_ , dim=1 )
UpperCAmelCase = self.fpn_bottleneck(lowercase_ )
UpperCAmelCase = self.classifier(lowercase_ )
return output
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :Optional[Any] , lowercase_ :int = 2 , lowercase_ :int = 3 , lowercase_ :Union[int, Tuple[int, int]] = 1 ) -> None:
super().__init__()
UpperCAmelCase = config
UpperCAmelCase = config.auxiliary_in_channels
UpperCAmelCase = config.auxiliary_channels
UpperCAmelCase = config.auxiliary_num_convs
UpperCAmelCase = config.auxiliary_concat_input
UpperCAmelCase = in_index
UpperCAmelCase = (kernel_size // 2) * dilation
UpperCAmelCase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowercase_ , padding=lowercase_ , dilation=lowercase_ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowercase_ , padding=lowercase_ , dilation=lowercase_ ) )
if self.num_convs == 0:
UpperCAmelCase = nn.Identity()
else:
UpperCAmelCase = nn.Sequential(*lowercase_ )
if self.concat_input:
UpperCAmelCase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowercase_ , padding=kernel_size // 2 )
UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def UpperCAmelCase__ ( self :List[str] ) -> Dict:
self.apply(self._init_weights )
def UpperCAmelCase__ ( self :int , lowercase_ :Any ) -> List[Any]:
if isinstance(lowercase_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self :Dict , lowercase_ :torch.Tensor ) -> torch.Tensor:
# just take the relevant feature maps
UpperCAmelCase = encoder_hidden_states[self.in_index]
UpperCAmelCase = self.convs(lowercase_ )
if self.concat_input:
UpperCAmelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCAmelCase = self.classifier(lowercase_ )
return output
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = UperNetConfig
__UpperCamelCase = """pixel_values"""
__UpperCamelCase = True
def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[str] ) -> Union[str, Any]:
if isinstance(lowercase_ , lowercase_ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self :int ) -> Tuple:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self :Any , lowercase_ :Dict , lowercase_ :Union[str, Any]=False ) -> Optional[int]:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = value
snake_case_ = R"""
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
snake_case_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , SCREAMING_SNAKE_CASE_ , )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :int , lowercase_ :Tuple ) -> int:
super().__init__(lowercase_ )
UpperCAmelCase = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCAmelCase = UperNetHead(lowercase_ , in_channels=self.backbone.channels )
UpperCAmelCase = UperNetFCNHead(lowercase_ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=lowercase_ , config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[torch.Tensor] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[torch.Tensor] = None , lowercase_ :Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCAmelCase = self.backbone.forward_with_filtered_kwargs(
lowercase_ , output_hidden_states=lowercase_ , output_attentions=lowercase_ )
UpperCAmelCase = outputs.feature_maps
UpperCAmelCase = self.decode_head(lowercase_ )
UpperCAmelCase = nn.functional.interpolate(lowercase_ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=lowercase_ )
UpperCAmelCase = None
if self.auxiliary_head is not None:
UpperCAmelCase = self.auxiliary_head(lowercase_ )
UpperCAmelCase = nn.functional.interpolate(
lowercase_ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=lowercase_ )
UpperCAmelCase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
UpperCAmelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCAmelCase = loss_fct(lowercase_ , lowercase_ )
UpperCAmelCase = loss_fct(lowercase_ , lowercase_ )
UpperCAmelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCAmelCase = (logits,) + outputs[1:]
else:
UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 78 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
snake_case_ = """1"""
snake_case_ = """0"""
snake_case_ = """1"""
snake_case_ = ort.SessionOptions()
snake_case_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
snake_case_ = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
snake_case_ = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
snake_case_ = ort.RunOptions()
snake_case_ = 128
snake_case_ = 1
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
snake_case_ = time.time()
snake_case_ = 2000
snake_case_ = {}
for iter in range(max_iters):
snake_case_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1000 / max_iters))
| 78 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
snake_case_ = 16
snake_case_ = 32
def _lowerCAmelCase ( lowercase_ , lowercase_ = 16 , lowercase_ = "bert-base-cased" ):
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
UpperCAmelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowercase_ ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowercase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowercase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase_ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(lowercase_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets['train'] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
UpperCAmelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
# Initialize accelerator
UpperCAmelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['lr']
UpperCAmelCase = int(config['num_epochs'] )
UpperCAmelCase = int(config['seed'] )
UpperCAmelCase = int(config['batch_size'] )
UpperCAmelCase = args.model_name_or_path
set_seed(lowercase_ )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(lowercase_ , lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(lowercase_ , return_dict=lowercase_ )
# Instantiate optimizer
UpperCAmelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase = optimizer_cls(params=model.parameters() , lr=lowercase_ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
UpperCAmelCase = 1
UpperCAmelCase = (len(lowercase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=0 , num_training_steps=lowercase_ , )
else:
UpperCAmelCase = DummyScheduler(lowercase_ , total_num_steps=lowercase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase = 0
# Now we train the model
UpperCAmelCase = evaluate.load('glue' , 'mrpc' )
UpperCAmelCase = 0
UpperCAmelCase = {}
for epoch in range(lowercase_ , lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
UpperCAmelCase = model(**lowercase_ )
UpperCAmelCase = outputs.loss
UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCAmelCase = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**lowercase_ )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase , UpperCAmelCase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase_ ) - 1:
UpperCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowercase_ )
UpperCAmelCase = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
UpperCAmelCase = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(lowercase_ , lowercase_ )
def _lowerCAmelCase ( ):
UpperCAmelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=lowercase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowercase_ , )
parser.add_argument(
'--output_dir' , type=lowercase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=lowercase_ , default=lowercase_ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=lowercase_ , default=3 , help='Number of train epochs.' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 78 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case_ = logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""pixel_values"""]
def __init__( self :int , lowercase_ :bool = True , lowercase_ :Dict[str, int] = None , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :bool = True , lowercase_ :Union[int, float] = 1 / 2_55 , lowercase_ :bool = True , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :bool = True , **lowercase_ :Union[str, Any] , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase = size if size is not None else {'height': 3_84, 'width': 3_84}
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase = do_convert_rgb
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :np.ndarray , lowercase_ :Dict[str, int] , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Any , ) -> np.ndarray:
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
UpperCAmelCase = (size['height'], size['width'])
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :np.ndarray , lowercase_ :Union[int, float] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[int] , ) -> int:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :np.ndarray , lowercase_ :Union[float, List[float]] , lowercase_ :Union[float, List[float]] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[Any] , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :ImageInput , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Dict[str, int]] = None , lowercase_ :PILImageResampling = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[float] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[str, TensorType]] = None , lowercase_ :bool = None , lowercase_ :ChannelDimension = ChannelDimension.FIRST , **lowercase_ :Tuple , ) -> PIL.Image.Image:
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase_ )
return encoded_outputs
| 78 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( ):
UpperCAmelCase = []
UpperCAmelCase = 1
while len(lowercase_ ) < 1e6:
constant.append(str(lowercase_ ) )
i += 1
UpperCAmelCase = ''.join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 78 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """beit"""
def __init__( self :List[str] , lowercase_ :List[Any]=81_92 , lowercase_ :str=7_68 , lowercase_ :List[str]=12 , lowercase_ :Optional[int]=12 , lowercase_ :Dict=30_72 , lowercase_ :Tuple="gelu" , lowercase_ :Any=0.0 , lowercase_ :Optional[int]=0.0 , lowercase_ :Dict=0.02 , lowercase_ :int=1E-12 , lowercase_ :List[Any]=2_24 , lowercase_ :Dict=16 , lowercase_ :List[Any]=3 , lowercase_ :List[str]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Union[str, Any]=0.1 , lowercase_ :str=0.1 , lowercase_ :str=True , lowercase_ :List[str]=[3, 5, 7, 11] , lowercase_ :Optional[int]=[1, 2, 3, 6] , lowercase_ :str=True , lowercase_ :int=0.4 , lowercase_ :Union[str, Any]=2_56 , lowercase_ :int=1 , lowercase_ :Tuple=False , lowercase_ :Optional[int]=2_55 , **lowercase_ :str , ) -> Any:
super().__init__(**lowercase_ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = use_mask_token
UpperCAmelCase = use_absolute_position_embeddings
UpperCAmelCase = use_relative_position_bias
UpperCAmelCase = use_shared_relative_position_bias
UpperCAmelCase = layer_scale_init_value
UpperCAmelCase = drop_path_rate
UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase = out_indices
UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase = use_auxiliary_head
UpperCAmelCase = auxiliary_loss_weight
UpperCAmelCase = auxiliary_channels
UpperCAmelCase = auxiliary_num_convs
UpperCAmelCase = auxiliary_concat_input
UpperCAmelCase = semantic_loss_ignore_index
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self :Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase__ ( self :Tuple ) -> float:
return 1E-4
| 78 | 1 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowercase_ :int ) -> None:
UpperCAmelCase = size
UpperCAmelCase = [0] * size
UpperCAmelCase = [0] * size
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = value
while index < self.size:
UpperCAmelCase = self.get_prev(lowercase_ ) + 1
if current_left_border == index:
UpperCAmelCase = value
else:
UpperCAmelCase = max(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = self.get_next(lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int , lowercase_ :int ) -> int:
right -= 1 # Because of right is exclusive
UpperCAmelCase = 0
while left <= right:
UpperCAmelCase = self.get_prev(lowercase_ )
if left <= current_left:
UpperCAmelCase = max(lowercase_ , self.tree[right] )
UpperCAmelCase = current_left
else:
UpperCAmelCase = max(lowercase_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case_ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 | 1 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Optional[int] ) -> None:
UpperCAmelCase = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
UpperCAmelCase = Vector()
def UpperCAmelCase__ ( self :Optional[int] ) -> None:
UpperCAmelCase = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(lowercase_ ) , '(0,0,0,0,0,1)' )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> None:
UpperCAmelCase = Vector([1, 2, 3, 4] )
self.assertEqual(len(lowercase_ ) , 4 )
def UpperCAmelCase__ ( self :Optional[int] ) -> None:
UpperCAmelCase = Vector([1, 2] )
UpperCAmelCase = Vector([1, 2, 3, 4, 5] )
UpperCAmelCase = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
UpperCAmelCase = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def UpperCAmelCase__ ( self :Dict ) -> None:
UpperCAmelCase = Vector([1, 2, 3] )
UpperCAmelCase = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCAmelCase__ ( self :Tuple ) -> None:
UpperCAmelCase = Vector([1, 2, 3] )
UpperCAmelCase = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCAmelCase__ ( self :Optional[int] ) -> None:
UpperCAmelCase = Vector([1, 2, 3] )
UpperCAmelCase = Vector([2, -1, 4] ) # for test of dot product
UpperCAmelCase = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def UpperCAmelCase__ ( self :Optional[int] ) -> None:
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def UpperCAmelCase__ ( self :List[Any] ) -> None:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def UpperCAmelCase__ ( self :Dict ) -> None:
UpperCAmelCase = Vector([1, 2, 3] )
UpperCAmelCase = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , lowercase_ , lowercase_ ) ) , '(3,4,7)' )
def UpperCAmelCase__ ( self :Tuple ) -> None:
UpperCAmelCase = Vector([1, 0, 0, 0, 0, 0] )
UpperCAmelCase = x.copy()
self.assertEqual(str(lowercase_ ) , str(lowercase_ ) )
def UpperCAmelCase__ ( self :List[str] ) -> None:
UpperCAmelCase = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(lowercase_ ) , '(0,1,0)' )
def UpperCAmelCase__ ( self :str ) -> None:
UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(lowercase_ ) )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> None:
UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(lowercase_ , lowercase_ ) )
def UpperCAmelCase__ ( self :Optional[Any] ) -> None:
UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(lowercase_ , lowercase_ ) )
def UpperCAmelCase__ ( self :Optional[Any] ) -> None:
UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> None:
UpperCAmelCase = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
UpperCAmelCase = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def UpperCAmelCase__ ( self :List[Any] ) -> None:
UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(lowercase_ ) )
def UpperCAmelCase__ ( self :int ) -> None:
UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCAmelCase__ ( self :Optional[Any] ) -> None:
UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def UpperCAmelCase__ ( self :str ) -> None:
UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def UpperCAmelCase__ ( self :Optional[Any] ) -> None:
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 78 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , lowercase_ ).groups()[0]
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :Dict , lowercase_ :List[str]=None , lowercase_ :Optional[Any]=None ) -> Optional[int]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self :Optional[int] ) -> Optional[Any]:
return len(self.file_names )
def __getitem__( self :int , lowercase_ :str ) -> List[str]:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowercase_ )
UpperCAmelCase = raw_image.convert('RGB' )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowercase_ )
UpperCAmelCase = extract_label(lowercase_ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['lr']
UpperCAmelCase = int(config['num_epochs'] )
UpperCAmelCase = int(config['seed'] )
UpperCAmelCase = int(config['batch_size'] )
UpperCAmelCase = config['image_size']
if not isinstance(lowercase_ , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(lowercase_ )[-1].split('.' )[0]
accelerator.init_trackers(lowercase_ , lowercase_ )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , lowercase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
UpperCAmelCase = [extract_label(lowercase_ ) for fname in file_names]
UpperCAmelCase = list(set(lowercase_ ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(lowercase_ )}
# Set the seed before splitting the data.
np.random.seed(lowercase_ )
torch.manual_seed(lowercase_ )
torch.cuda.manual_seed_all(lowercase_ )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(lowercase_ ) )
UpperCAmelCase = int(0.8 * len(lowercase_ ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(lowercase_ , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(lowercase_ ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model('resnet50d' , pretrained=lowercase_ , num_classes=len(lowercase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=lowercase_ , max_lr=lowercase_ , epochs=lowercase_ , steps_per_epoch=len(lowercase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(lowercase_ )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace('epoch_' , '' ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace('step_' , '' ) )
UpperCAmelCase = resume_step // len(lowercase_ )
resume_step -= starting_epoch * len(lowercase_ )
# Now we train the model
for epoch in range(lowercase_ , lowercase_ ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(lowercase_ , lowercase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch['image'] - mean) / std
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = torch.nn.functional.cross_entropy(lowercase_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch['image'] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['label']) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(lowercase_ ),
'epoch': epoch,
} , step=lowercase_ , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase ( ):
UpperCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=lowercase_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=lowercase_ , default=lowercase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=lowercase_ , default=lowercase_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=lowercase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=lowercase_ , default=lowercase_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=lowercase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 78 | 1 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
snake_case_ = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1024,
'hidden_size': 768,
'max_length': 512,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1024,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
UpperCAmelCase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
UpperCAmelCase = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowercase_ , output_all_encodings=lowercase_ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowercase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
UpperCAmelCase = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
UpperCAmelCase = os.path.join(get_home_dir() , 'models' )
UpperCAmelCase = _load_vocab(lowercase_ , lowercase_ , lowercase_ , cls=lowercase_ )
UpperCAmelCase = nlp.model.BERTModel(
lowercase_ , len(lowercase_ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowercase_ , use_token_type_embed=lowercase_ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowercase_ , use_decoder=lowercase_ , )
original_bort.load_parameters(lowercase_ , cast_dtype=lowercase_ , ignore_extra=lowercase_ )
UpperCAmelCase = original_bort._collect_params_with_prefix()
# Build our config 🤗
UpperCAmelCase = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.0_2,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowercase_ ),
}
UpperCAmelCase = BertConfig.from_dict(lowercase_ )
UpperCAmelCase = BertForMaskedLM(lowercase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowercase_ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowercase_ , lowercase_ ):
UpperCAmelCase = hf_param.shape
UpperCAmelCase = to_torch(params[gluon_param] )
UpperCAmelCase = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
UpperCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
UpperCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
UpperCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
UpperCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
UpperCAmelCase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
UpperCAmelCase = hf_bort_model.bert.encoder.layer[i]
# self attention
UpperCAmelCase = layer.attention.self
UpperCAmelCase = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
UpperCAmelCase = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
UpperCAmelCase = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
UpperCAmelCase = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
UpperCAmelCase = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
UpperCAmelCase = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
UpperCAmelCase = layer.attention.output
UpperCAmelCase = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
UpperCAmelCase = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
UpperCAmelCase = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
UpperCAmelCase = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
UpperCAmelCase = layer.intermediate
UpperCAmelCase = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
UpperCAmelCase = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
UpperCAmelCase = layer.output
UpperCAmelCase = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
UpperCAmelCase = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
UpperCAmelCase = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
UpperCAmelCase = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
UpperCAmelCase = RobertaTokenizer.from_pretrained('roberta-base' )
UpperCAmelCase = tokenizer.encode_plus(lowercase_ )['input_ids']
# Get gluon output
UpperCAmelCase = mx.nd.array([input_ids] )
UpperCAmelCase = original_bort(inputs=lowercase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowercase_ )
UpperCAmelCase = BertModel.from_pretrained(lowercase_ )
hf_bort_model.eval()
UpperCAmelCase = tokenizer.encode_plus(lowercase_ , return_tensors='pt' )
UpperCAmelCase = hf_bort_model(**lowercase_ )[0]
UpperCAmelCase = output_gluon[0].asnumpy()
UpperCAmelCase = output_hf[0].detach().numpy()
UpperCAmelCase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
UpperCAmelCase = np.allclose(lowercase_ , lowercase_ , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowercase_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case_ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 78 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = list(range(len(lowercase_ ) ) )
UpperCAmelCase = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
UpperCAmelCase = 0
UpperCAmelCase = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
snake_case_ = parse(importlib.metadata.version("""torch"""))
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
UpperCAmelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = parse(importlib.metadata.version(lowercase_ ) )
return operation(lowercase_ , parse(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return compare_versions(lowercase_ , lowercase_ , lowercase_ )
| 78 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Any , *lowercase_ :str , **lowercase_ :List[Any] ) -> Union[str, Any]:
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any=None , lowercase_ :Optional[int]=None , lowercase_ :Tuple=None , **lowercase_ :Tuple ) -> Dict:
UpperCAmelCase , UpperCAmelCase = {}, {}
if padding is not None:
UpperCAmelCase = padding
if truncation is not None:
UpperCAmelCase = truncation
if top_k is not None:
UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self :List[Any] , lowercase_ :Union["Image.Image", str] , lowercase_ :str = None , **lowercase_ :Union[str, Any] ) -> Union[str, Any]:
if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = {'image': image, 'question': question}
else:
UpperCAmelCase = image
UpperCAmelCase = super().__call__(lowercase_ , **lowercase_ )
return results
def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[Any] , lowercase_ :int=False , lowercase_ :Optional[int]=False ) -> Union[str, Any]:
UpperCAmelCase = load_image(inputs['image'] )
UpperCAmelCase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ )
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[str] ) -> Any:
UpperCAmelCase = self.model(**lowercase_ )
return model_outputs
def UpperCAmelCase__ ( self :Dict , lowercase_ :Tuple , lowercase_ :List[Any]=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.sigmoid()[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(lowercase_ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 78 | 1 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ = data_utils.TransfoXLTokenizer
snake_case_ = data_utils.TransfoXLCorpus
snake_case_ = data_utils
snake_case_ = data_utils
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowercase_ , 'rb' ) as fp:
UpperCAmelCase = pickle.load(lowercase_ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCAmelCase = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
UpperCAmelCase = corpus.vocab.__dict__
torch.save(lowercase_ , lowercase_ )
UpperCAmelCase = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , lowercase_ )
UpperCAmelCase = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(lowercase_ , lowercase_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCAmelCase = os.path.abspath(lowercase_ )
UpperCAmelCase = os.path.abspath(lowercase_ )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCAmelCase = TransfoXLConfig()
else:
UpperCAmelCase = TransfoXLConfig.from_json_file(lowercase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase = TransfoXLLMHeadModel(lowercase_ )
UpperCAmelCase = load_tf_weights_in_transfo_xl(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
UpperCAmelCase = os.path.join(lowercase_ , lowercase_ )
UpperCAmelCase = os.path.join(lowercase_ , lowercase_ )
print(F"""Save PyTorch model to {os.path.abspath(lowercase_ )}""" )
torch.save(model.state_dict() , lowercase_ )
print(F"""Save configuration file to {os.path.abspath(lowercase_ )}""" )
with open(lowercase_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
snake_case_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 78 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """transfo-xl"""
__UpperCamelCase = ["""mems"""]
__UpperCamelCase = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :List[Any] , lowercase_ :Optional[int]=26_77_35 , lowercase_ :Union[str, Any]=[2_00_00, 4_00_00, 20_00_00] , lowercase_ :List[Any]=10_24 , lowercase_ :Optional[Any]=10_24 , lowercase_ :Tuple=16 , lowercase_ :Tuple=64 , lowercase_ :Any=40_96 , lowercase_ :int=4 , lowercase_ :List[str]=False , lowercase_ :Union[str, Any]=18 , lowercase_ :Optional[Any]=16_00 , lowercase_ :Dict=10_00 , lowercase_ :Optional[int]=True , lowercase_ :Tuple=True , lowercase_ :Dict=0 , lowercase_ :Tuple=-1 , lowercase_ :Optional[int]=True , lowercase_ :Optional[int]=0.1 , lowercase_ :str=0.0 , lowercase_ :List[str]=True , lowercase_ :int="normal" , lowercase_ :Dict=0.01 , lowercase_ :Optional[Any]=0.01 , lowercase_ :Dict=0.02 , lowercase_ :Tuple=1E-5 , lowercase_ :str=0 , **lowercase_ :Tuple , ) -> List[str]:
UpperCAmelCase = vocab_size
UpperCAmelCase = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
UpperCAmelCase = [False] + [True] * len(self.cutoffs )
else:
UpperCAmelCase = [False] + [False] * len(self.cutoffs )
UpperCAmelCase = d_model
UpperCAmelCase = d_embed
UpperCAmelCase = d_head
UpperCAmelCase = d_inner
UpperCAmelCase = div_val
UpperCAmelCase = pre_lnorm
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = mem_len
UpperCAmelCase = same_length
UpperCAmelCase = attn_type
UpperCAmelCase = clamp_len
UpperCAmelCase = sample_softmax
UpperCAmelCase = adaptive
UpperCAmelCase = dropout
UpperCAmelCase = dropatt
UpperCAmelCase = untie_r
UpperCAmelCase = init
UpperCAmelCase = init_range
UpperCAmelCase = proj_init_std
UpperCAmelCase = init_std
UpperCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Any:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any ) -> Tuple:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 78 | 1 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
snake_case_ = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class A_ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :str = " " ) -> str:
UpperCAmelCase = sentence_delimiter
def UpperCAmelCase__ ( self :Any , lowercase_ :str ) -> Any:
return list(lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[str] ) -> Optional[int]:
UpperCAmelCase = []
for sent_idx, sentence in enumerate(lowercase_ ):
chars.extend(self.process_string(lowercase_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
snake_case_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
snake_case_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
snake_case_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
snake_case_ = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
snake_case_ = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Dict , lowercase_ :Optional[int] , lowercase_ :Any=False ) -> int:
if concatenate_texts:
return jiwer.compute_measures(
lowercase_ , lowercase_ , truth_transform=lowercase_ , hypothesis_transform=lowercase_ , )["wer"]
UpperCAmelCase = 0
UpperCAmelCase = 0
for prediction, reference in zip(lowercase_ , lowercase_ ):
UpperCAmelCase = jiwer.compute_measures(
lowercase_ , lowercase_ , truth_transform=lowercase_ , hypothesis_transform=lowercase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 78 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowerCAmelCase ( lowercase_ = "isbn/0140328726" ):
UpperCAmelCase = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
UpperCAmelCase = F"""{olid} is not a valid Open Library olid"""
raise ValueError(lowercase_ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
UpperCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
UpperCAmelCase = data['First sentence']['value']
for key, value in data.items():
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = ', '.join(lowercase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
snake_case_ = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
snake_case_ = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 78 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :int , lowercase_ :Optional[int]=None , lowercase_ :List[str]=None ) -> str:
UpperCAmelCase = data
UpperCAmelCase = previous
UpperCAmelCase = next_node
def __str__( self :Optional[Any] ) -> str:
return f"""{self.data}"""
def UpperCAmelCase__ ( self :int ) -> int:
return self.data
def UpperCAmelCase__ ( self :List[str] ) -> Any:
return self.next
def UpperCAmelCase__ ( self :Tuple ) -> Optional[int]:
return self.previous
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :Optional[Any] ) -> str:
UpperCAmelCase = head
def __iter__( self :List[str] ) -> List[str]:
return self
def UpperCAmelCase__ ( self :int ) -> Any:
if not self.current:
raise StopIteration
else:
UpperCAmelCase = self.current.get_data()
UpperCAmelCase = self.current.get_next()
return value
class A_ :
"""simple docstring"""
def __init__( self :Union[str, Any] ) -> List[Any]:
UpperCAmelCase = None # First node in list
UpperCAmelCase = None # Last node in list
def __str__( self :List[Any] ) -> Optional[Any]:
UpperCAmelCase = self.head
UpperCAmelCase = []
while current is not None:
nodes.append(current.get_data() )
UpperCAmelCase = current.get_next()
return " ".join(str(lowercase_ ) for node in nodes )
def __contains__( self :str , lowercase_ :int ) -> str:
UpperCAmelCase = self.head
while current:
if current.get_data() == value:
return True
UpperCAmelCase = current.get_next()
return False
def __iter__( self :Tuple ) -> Dict:
return LinkedListIterator(self.head )
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]:
if self.head:
return self.head.get_data()
return None
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]:
if self.tail:
return self.tail.get_data()
return None
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Node ) -> None:
if self.head is None:
UpperCAmelCase = node
UpperCAmelCase = node
else:
self.insert_before_node(self.head , lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :Node ) -> None:
if self.head is None:
self.set_head(lowercase_ )
else:
self.insert_after_node(self.tail , lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int ) -> None:
UpperCAmelCase = Node(lowercase_ )
if self.head is None:
self.set_head(lowercase_ )
else:
self.set_tail(lowercase_ )
def UpperCAmelCase__ ( self :int , lowercase_ :Node , lowercase_ :Node ) -> None:
UpperCAmelCase = node
UpperCAmelCase = node.previous
if node.get_previous() is None:
UpperCAmelCase = node_to_insert
else:
UpperCAmelCase = node_to_insert
UpperCAmelCase = node_to_insert
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Node , lowercase_ :Node ) -> None:
UpperCAmelCase = node
UpperCAmelCase = node.next
if node.get_next() is None:
UpperCAmelCase = node_to_insert
else:
UpperCAmelCase = node_to_insert
UpperCAmelCase = node_to_insert
def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = 1
UpperCAmelCase = Node(lowercase_ )
UpperCAmelCase = self.head
while node:
if current_position == position:
self.insert_before_node(lowercase_ , lowercase_ )
return
current_position += 1
UpperCAmelCase = node.next
self.insert_after_node(self.tail , lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int ) -> Node:
UpperCAmelCase = self.head
while node:
if node.get_data() == item:
return node
UpperCAmelCase = node.get_next()
raise Exception('Node not found' )
def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[Any] ) -> Dict:
if (node := self.get_node(lowercase_ )) is not None:
if node == self.head:
UpperCAmelCase = self.head.get_next()
if node == self.tail:
UpperCAmelCase = self.tail.get_previous()
self.remove_node_pointers(lowercase_ )
@staticmethod
def UpperCAmelCase__ ( lowercase_ :Node ) -> None:
if node.get_next():
UpperCAmelCase = node.previous
if node.get_previous():
UpperCAmelCase = node.next
UpperCAmelCase = None
UpperCAmelCase = None
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]:
return self.head is None
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
snake_case_ = {"""UserAgent""": UserAgent().random}
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = script.contents[0]
UpperCAmelCase = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A_ :
"""simple docstring"""
def __init__( self :int , lowercase_ :str ) -> Dict:
UpperCAmelCase = f"""https://www.instagram.com/{username}/"""
UpperCAmelCase = self.get_json()
def UpperCAmelCase__ ( self :Tuple ) -> dict:
UpperCAmelCase = requests.get(self.url , headers=lowercase_ ).text
UpperCAmelCase = BeautifulSoup(lowercase_ , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self :Dict ) -> str:
return f"""{self.__class__.__name__}('{self.username}')"""
def __str__( self :int ) -> str:
return f"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def UpperCAmelCase__ ( self :Optional[Any] ) -> str:
return self.user_data["username"]
@property
def UpperCAmelCase__ ( self :str ) -> str:
return self.user_data["full_name"]
@property
def UpperCAmelCase__ ( self :Tuple ) -> str:
return self.user_data["biography"]
@property
def UpperCAmelCase__ ( self :Optional[Any] ) -> str:
return self.user_data["business_email"]
@property
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
return self.user_data["external_url"]
@property
def UpperCAmelCase__ ( self :Optional[int] ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCAmelCase__ ( self :int ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def UpperCAmelCase__ ( self :List[str] ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCAmelCase__ ( self :Dict ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def UpperCAmelCase__ ( self :Any ) -> bool:
return self.user_data["is_verified"]
@property
def UpperCAmelCase__ ( self :Dict ) -> bool:
return self.user_data["is_private"]
def _lowerCAmelCase ( lowercase_ = "github" ):
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
UpperCAmelCase = InstagramUser(lowercase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ = InstagramUser("""github""")
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 78 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowercase_ :int ) -> None:
UpperCAmelCase = size
UpperCAmelCase = [0] * size
UpperCAmelCase = [0] * size
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = value
while index < self.size:
UpperCAmelCase = self.get_prev(lowercase_ ) + 1
if current_left_border == index:
UpperCAmelCase = value
else:
UpperCAmelCase = max(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = self.get_next(lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int , lowercase_ :int ) -> int:
right -= 1 # Because of right is exclusive
UpperCAmelCase = 0
while left <= right:
UpperCAmelCase = self.get_prev(lowercase_ )
if left <= current_left:
UpperCAmelCase = max(lowercase_ , self.tree[right] )
UpperCAmelCase = current_left
else:
UpperCAmelCase = max(lowercase_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :int , lowercase_ :Tuple=13 , lowercase_ :Any=7 , lowercase_ :List[str]=True , lowercase_ :Optional[int]=True , lowercase_ :str=False , lowercase_ :Optional[int]=True , lowercase_ :Union[str, Any]=99 , lowercase_ :Optional[int]=32 , lowercase_ :int=5 , lowercase_ :int=4 , lowercase_ :Optional[int]=64 , lowercase_ :Union[str, Any]="gelu" , lowercase_ :Union[str, Any]=0.1 , lowercase_ :List[Any]=0.1 , lowercase_ :Union[str, Any]=5_12 , lowercase_ :Tuple=16 , lowercase_ :Union[str, Any]=2 , lowercase_ :Optional[Any]=0.02 , lowercase_ :Union[str, Any]=3 , lowercase_ :int=4 , lowercase_ :Any=None , lowercase_ :Any=2 , lowercase_ :List[Any]=2 , lowercase_ :Optional[Any]=2 , lowercase_ :Any=2 , lowercase_ :int=4 , lowercase_ :Optional[int]=1 , ) -> int:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = q_groups
UpperCAmelCase = k_groups
UpperCAmelCase = v_groups
UpperCAmelCase = post_attention_groups
UpperCAmelCase = intermediate_groups
UpperCAmelCase = output_groups
def UpperCAmelCase__ ( self :Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :List[str] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Any , lowercase_ :Dict , lowercase_ :Tuple ) -> str:
UpperCAmelCase = SqueezeBertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , lowercase_ )
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Optional[int] , lowercase_ :str , lowercase_ :Tuple , lowercase_ :List[Any] , lowercase_ :Dict , lowercase_ :Any ) -> int:
UpperCAmelCase = SqueezeBertForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[Any] , lowercase_ :Optional[int] , lowercase_ :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :Dict , lowercase_ :List[str] ) -> Dict:
UpperCAmelCase = SqueezeBertForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :str , lowercase_ :List[str] , lowercase_ :int , lowercase_ :str , lowercase_ :Optional[int] , lowercase_ :Union[str, Any] ) -> int:
UpperCAmelCase = self.num_labels
UpperCAmelCase = SqueezeBertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :Any , lowercase_ :Dict , lowercase_ :Optional[int] , lowercase_ :Tuple ) -> Any:
UpperCAmelCase = self.num_labels
UpperCAmelCase = SqueezeBertForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self :int , lowercase_ :Dict , lowercase_ :Any , lowercase_ :Tuple , lowercase_ :Optional[Any] , lowercase_ :int , lowercase_ :str ) -> int:
UpperCAmelCase = self.num_choices
UpperCAmelCase = SqueezeBertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Tuple:
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCamelCase = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = False
def UpperCAmelCase__ ( self :str ) -> Dict:
UpperCAmelCase = SqueezeBertModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , dim=37 )
def UpperCAmelCase__ ( self :int ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :List[str] ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase_ )
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase_ )
def UpperCAmelCase__ ( self :str ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase_ )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase_ )
def UpperCAmelCase__ ( self :List[str] ) -> Any:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase_ )
@slow
def UpperCAmelCase__ ( self :Dict ) -> List[str]:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = SqueezeBertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self :Tuple ) -> int:
UpperCAmelCase = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
UpperCAmelCase = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
UpperCAmelCase = model(lowercase_ )[0]
UpperCAmelCase = torch.Size((1, 3) )
self.assertEqual(output.shape , lowercase_ )
UpperCAmelCase = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-4 ) )
| 78 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :str = "▁" , lowercase_ :bool = True , lowercase_ :Union[str, AddedToken] = "<unk>" , lowercase_ :Union[str, AddedToken] = "</s>" , lowercase_ :Union[str, AddedToken] = "<pad>" , ) -> str:
UpperCAmelCase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
UpperCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase = token_dict['token']
UpperCAmelCase = Tokenizer(Unigram() )
UpperCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
UpperCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ),
pre_tokenizers.Digits(individual_digits=lowercase_ ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase = decoders.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
UpperCAmelCase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Union[str, List[str]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Union[str, Any]:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [files]
self._tokenizer.train(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :str , lowercase_ :Union[Iterator[str], Iterator[Iterator[str]]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Tuple:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
self._tokenizer.train_from_iterator(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = json.loads(self._tokenizer.to_str() )
UpperCAmelCase = self.special_tokens['unk']['id']
UpperCAmelCase = Tokenizer.from_str(json.dumps(lowercase_ ) )
| 78 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class A_ :
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :int , lowercase_ :MutableSequence[float] ) -> None:
if len(lowercase_ ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = degree
def __add__( self :List[str] , lowercase_ :Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
UpperCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowercase_ )
else:
UpperCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowercase_ )
def __sub__( self :str , lowercase_ :Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self :Optional[int] ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self :int , lowercase_ :Polynomial ) -> Polynomial:
UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :int | float ) -> int | float:
UpperCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self :List[Any] ) -> str:
UpperCAmelCase = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowercase_ )
return polynomial
def __repr__( self :int ) -> str:
return self.__str__()
def UpperCAmelCase__ ( self :int ) -> Polynomial:
UpperCAmelCase = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowercase_ )
def UpperCAmelCase__ ( self :str , lowercase_ :int | float = 0 ) -> Polynomial:
UpperCAmelCase = [0] * (self.degree + 2)
UpperCAmelCase = constant
for i in range(self.degree + 1 ):
UpperCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowercase_ )
def __eq__( self :Dict , lowercase_ :object ) -> bool:
if not isinstance(lowercase_ , lowercase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self :List[Any] , lowercase_ :object ) -> bool:
return not self.__eq__(lowercase_ )
| 78 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _lowerCAmelCase ( lowercase_ = 8 ):
UpperCAmelCase = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(lowercase_ )
UpperCAmelCase = i // 3
UpperCAmelCase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCAmelCase = (
chars_incl
+ random(lowercase_ , quotient + remainder )
+ random(lowercase_ , lowercase_ )
+ random(lowercase_ , lowercase_ )
)
UpperCAmelCase = list(lowercase_ )
shuffle(lowercase_ )
return "".join(lowercase_ )
# random is a generalised function for letters, characters and numbers
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ = 8 ):
if len(lowercase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCAmelCase = any(char in ascii_uppercase for char in password )
UpperCAmelCase = any(char in ascii_lowercase for char in password )
UpperCAmelCase = any(char in digits for char in password )
UpperCAmelCase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _lowerCAmelCase ( ):
UpperCAmelCase = int(input('Please indicate the max length of your password: ' ).strip() )
UpperCAmelCase = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(lowercase_ ) )
print(
'Alternative Password generated:' , alternative_password_generator(lowercase_ , lowercase_ ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 78 | 1 |
"""simple docstring"""
from math import ceil
def _lowerCAmelCase ( lowercase_ = 1001 ):
UpperCAmelCase = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
UpperCAmelCase = 2 * i + 1
UpperCAmelCase = 2 * i
UpperCAmelCase = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
snake_case_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 78 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
"""simple docstring"""
def UpperCAmelCase__ ( self :Any ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self :List[Any] ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self :List[str] ) -> str:
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = inputs['prompt']
UpperCAmelCase = inputs['generator']
UpperCAmelCase = inputs['num_inference_steps']
UpperCAmelCase = inputs['output_type']
if "image" in inputs:
UpperCAmelCase = inputs['image']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['mask_image']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['original_image']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(lowercase_ )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase_ , lowercase_ ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = inputs['generator']
UpperCAmelCase = inputs['num_inference_steps']
UpperCAmelCase = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**lowercase_ )[0]
UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
def UpperCAmelCase__ ( self :List[Any] ) -> str:
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = pipe_loaded(**lowercase_ )[0]
UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
| 78 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Any , *lowercase_ :List[str] , **lowercase_ :Optional[int] ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[int] , *lowercase_ :List[str] , **lowercase_ :str ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :int , *lowercase_ :Optional[int] , **lowercase_ :Optional[int] ) -> str:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Union[str, Any] , *lowercase_ :Tuple , **lowercase_ :List[str] ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[int] , *lowercase_ :int , **lowercase_ :Dict ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , *lowercase_ :Union[str, Any] , **lowercase_ :List[str] ) -> str:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Any , *lowercase_ :Any , **lowercase_ :Tuple ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :int , *lowercase_ :int , **lowercase_ :Union[str, Any] ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , *lowercase_ :Union[str, Any] , **lowercase_ :Optional[int] ) -> List[str]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Union[str, Any] , *lowercase_ :List[str] , **lowercase_ :Optional[int] ) -> List[str]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :List[str] , **lowercase_ :List[Any] ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :Tuple , **lowercase_ :Optional[Any] ) -> List[Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[Any] , *lowercase_ :List[str] , **lowercase_ :Tuple ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Dict , *lowercase_ :Optional[int] , **lowercase_ :Any ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[str] , *lowercase_ :Tuple , **lowercase_ :List[str] ) -> int:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :str , *lowercase_ :Any , **lowercase_ :Dict ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , *lowercase_ :int , **lowercase_ :List[str] ) -> Dict:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[str] , *lowercase_ :Optional[Any] , **lowercase_ :Dict ) -> Dict:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Tuple , *lowercase_ :Optional[int] , **lowercase_ :int ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[int] , *lowercase_ :str , **lowercase_ :Any ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :int , **lowercase_ :Optional[Any] ) -> str:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Tuple , *lowercase_ :Any , **lowercase_ :Optional[int] ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :int , *lowercase_ :Optional[int] , **lowercase_ :str ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Dict , *lowercase_ :int , **lowercase_ :Optional[int] ) -> Any:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Tuple , *lowercase_ :Tuple , **lowercase_ :List[Any] ) -> Optional[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :List[Any] , **lowercase_ :int ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :int , **lowercase_ :int ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :List[Any] , **lowercase_ :Tuple ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :int , **lowercase_ :Union[str, Any] ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :Any , **lowercase_ :Optional[Any] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :Optional[Any] , **lowercase_ :Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :int , **lowercase_ :List[Any] ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[str] , *lowercase_ :Any , **lowercase_ :int ) -> List[str]:
requires_backends(cls , ['torch'] )
def _lowerCAmelCase ( *lowercase_ , **lowercase_ ):
requires_backends(lowercase_ , ['torch'] )
def _lowerCAmelCase ( *lowercase_ , **lowercase_ ):
requires_backends(lowercase_ , ['torch'] )
def _lowerCAmelCase ( *lowercase_ , **lowercase_ ):
requires_backends(lowercase_ , ['torch'] )
def _lowerCAmelCase ( *lowercase_ , **lowercase_ ):
requires_backends(lowercase_ , ['torch'] )
def _lowerCAmelCase ( *lowercase_ , **lowercase_ ):
requires_backends(lowercase_ , ['torch'] )
def _lowerCAmelCase ( *lowercase_ , **lowercase_ ):
requires_backends(lowercase_ , ['torch'] )
def _lowerCAmelCase ( *lowercase_ , **lowercase_ ):
requires_backends(lowercase_ , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :Union[str, Any] , **lowercase_ :List[Any] ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , *lowercase_ :Tuple , **lowercase_ :List[str] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :Dict , **lowercase_ :Optional[Any] ) -> List[str]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :List[str] , **lowercase_ :Dict ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :Any , **lowercase_ :List[Any] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[str] , *lowercase_ :Union[str, Any] , **lowercase_ :str ) -> Dict:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Dict , *lowercase_ :int , **lowercase_ :Tuple ) -> List[str]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , *lowercase_ :str , **lowercase_ :Tuple ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Dict , *lowercase_ :str , **lowercase_ :str ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Any , *lowercase_ :Optional[int] , **lowercase_ :Union[str, Any] ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :str , **lowercase_ :List[Any] ) -> Dict:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :List[str] , **lowercase_ :Optional[int] ) -> int:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Optional[int] , *lowercase_ :Dict , **lowercase_ :str ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :Any , **lowercase_ :str ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[str] , *lowercase_ :Optional[Any] , **lowercase_ :str ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Union[str, Any] , *lowercase_ :str , **lowercase_ :Optional[int] ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :List[str] , **lowercase_ :List[Any] ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :str , **lowercase_ :Optional[int] ) -> Any:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Optional[int] , *lowercase_ :Any , **lowercase_ :Any ) -> List[str]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :int , **lowercase_ :Optional[int] ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :int , *lowercase_ :Optional[int] , **lowercase_ :int ) -> List[str]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Optional[Any] , *lowercase_ :int , **lowercase_ :Any ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[int] , *lowercase_ :List[Any] , **lowercase_ :Dict ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :Dict , **lowercase_ :Optional[Any] ) -> List[Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :List[Any] , **lowercase_ :Tuple ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :Tuple , **lowercase_ :Any ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :Optional[int] , **lowercase_ :int ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :str , **lowercase_ :Optional[int] ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[str] , *lowercase_ :Tuple , **lowercase_ :Tuple ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :Optional[Any] , **lowercase_ :Union[str, Any] ) -> str:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Dict , *lowercase_ :Union[str, Any] , **lowercase_ :Dict ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :Optional[Any] , **lowercase_ :List[str] ) -> Dict:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[int] , *lowercase_ :Tuple , **lowercase_ :Any ) -> List[str]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[Any] , *lowercase_ :Union[str, Any] , **lowercase_ :List[str] ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :List[str] , **lowercase_ :Dict ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :Optional[Any] , **lowercase_ :Dict ) -> Tuple:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :str , **lowercase_ :Any ) -> Optional[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[int] , *lowercase_ :str , **lowercase_ :str ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :int , **lowercase_ :Tuple ) -> Dict:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Union[str, Any] , *lowercase_ :Optional[int] , **lowercase_ :int ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Dict , *lowercase_ :List[str] , **lowercase_ :Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , *lowercase_ :Optional[Any] , **lowercase_ :Optional[int] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Tuple , *lowercase_ :str , **lowercase_ :Dict ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :List[Any] , **lowercase_ :List[Any] ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :List[Any] , **lowercase_ :Any ) -> List[Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Optional[Any] , *lowercase_ :Dict , **lowercase_ :str ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :Any , **lowercase_ :Dict ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Dict , *lowercase_ :int , **lowercase_ :Dict ) -> int:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :List[str] , **lowercase_ :Any ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :Any , **lowercase_ :Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :Optional[int] , **lowercase_ :List[Any] ) -> Tuple:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[Any] , *lowercase_ :int , **lowercase_ :Any ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :List[Any] , **lowercase_ :Optional[Any] ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :Dict , **lowercase_ :Optional[int] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Optional[Any] , *lowercase_ :Any , **lowercase_ :List[Any] ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :List[Any] , **lowercase_ :Union[str, Any] ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :Union[str, Any] , **lowercase_ :Optional[Any] ) -> List[Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :str , **lowercase_ :List[str] ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :Any , **lowercase_ :Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , *lowercase_ :Optional[Any] , **lowercase_ :List[Any] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Optional[int] , *lowercase_ :Optional[int] , **lowercase_ :Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[int] , *lowercase_ :Dict , **lowercase_ :Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :List[Any] , **lowercase_ :Dict ) -> str:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :int , *lowercase_ :Any , **lowercase_ :List[str] ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :List[Any] , **lowercase_ :List[str] ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[str] , *lowercase_ :List[Any] , **lowercase_ :List[str] ) -> int:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Tuple , *lowercase_ :Union[str, Any] , **lowercase_ :List[str] ) -> List[str]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :int , *lowercase_ :Dict , **lowercase_ :Optional[Any] ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :Optional[int] , **lowercase_ :Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Any , *lowercase_ :Optional[Any] , **lowercase_ :List[str] ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :int , **lowercase_ :List[str] ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :Optional[int] , **lowercase_ :List[str] ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Optional[Any] , *lowercase_ :List[str] , **lowercase_ :int ) -> List[str]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :str , **lowercase_ :Optional[int] ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :str , **lowercase_ :str ) -> Dict:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Optional[Any] , *lowercase_ :int , **lowercase_ :Optional[int] ) -> Tuple:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :str , **lowercase_ :Optional[Any] ) -> Dict:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :Optional[int] , **lowercase_ :Optional[int] ) -> Tuple:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :Optional[int] , **lowercase_ :List[Any] ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :List[str] , **lowercase_ :Optional[int] ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :List[Any] , **lowercase_ :Optional[Any] ) -> Dict:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :Dict , **lowercase_ :Optional[int] ) -> Tuple:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :List[str] , **lowercase_ :Any ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :List[Any] , **lowercase_ :Any ) -> int:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Dict , *lowercase_ :str , **lowercase_ :List[str] ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :List[str] , **lowercase_ :List[str] ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :int , *lowercase_ :List[str] , **lowercase_ :Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :int , *lowercase_ :List[str] , **lowercase_ :Optional[Any] ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :Union[str, Any] , **lowercase_ :Tuple ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :str , *lowercase_ :Optional[int] , **lowercase_ :Optional[Any] ) -> Dict:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[Any] , *lowercase_ :List[str] , **lowercase_ :List[str] ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , *lowercase_ :Dict , **lowercase_ :Optional[Any] ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :int , *lowercase_ :Any , **lowercase_ :str ) -> str:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Tuple , *lowercase_ :Union[str, Any] , **lowercase_ :Union[str, Any] ) -> List[str]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[str] , *lowercase_ :int , **lowercase_ :int ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :List[Any] , **lowercase_ :Union[str, Any] ) -> Any:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Dict , *lowercase_ :int , **lowercase_ :List[Any] ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :int , *lowercase_ :str , **lowercase_ :Any ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , *lowercase_ :List[Any] , **lowercase_ :List[Any] ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :str , *lowercase_ :List[Any] , **lowercase_ :Dict ) -> Tuple:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[str] , *lowercase_ :Union[str, Any] , **lowercase_ :Optional[Any] ) -> Dict:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :Tuple , **lowercase_ :Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Optional[Any] , *lowercase_ :Optional[int] , **lowercase_ :Optional[Any] ) -> Optional[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :Union[str, Any] , **lowercase_ :str ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Dict , *lowercase_ :Dict , **lowercase_ :Optional[int] ) -> Any:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :Tuple , *lowercase_ :List[Any] , **lowercase_ :Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :Optional[Any] , **lowercase_ :Optional[int] ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :Optional[int] , **lowercase_ :Any ) -> List[str]:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[str] , *lowercase_ :str , **lowercase_ :Optional[Any] ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :int , *lowercase_ :Dict , **lowercase_ :Dict ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , *lowercase_ :Union[str, Any] , **lowercase_ :Optional[Any] ) -> Tuple:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :str , *lowercase_ :Dict , **lowercase_ :Optional[int] ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Any , *lowercase_ :Dict , **lowercase_ :List[str] ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , *lowercase_ :Optional[Any] , **lowercase_ :Optional[int] ) -> str:
requires_backends(cls , ['torch'] )
class A_ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""torch"""]
def __init__( self :List[Any] , *lowercase_ :Dict , **lowercase_ :Union[str, Any] ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Dict , *lowercase_ :int , **lowercase_ :Optional[Any] ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase__ ( cls :Optional[Any] , *lowercase_ :str , **lowercase_ :Tuple ) -> List[str]:
requires_backends(cls , ['torch'] )
| 78 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=8 ):
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCAmelCase ( lowercase_ , lowercase_=512 , lowercase_=512 ):
UpperCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase = arr.astype(np.floataa ) / 1_2_7.5 - 1
UpperCAmelCase = np.transpose(lowercase_ , [2, 0, 1] )
UpperCAmelCase = torch.from_numpy(lowercase_ ).unsqueeze(0 )
return image
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :UNetaDConditionModel , lowercase_ :DDPMScheduler , lowercase_ :VQModel , ) -> List[str]:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Any ) -> Optional[int]:
# get the original timestep using init_timestep
UpperCAmelCase = min(int(num_inference_steps * strength ) , lowercase_ )
UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Dict , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Optional[Any] , lowercase_ :Any=None ) -> Any:
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" )
UpperCAmelCase = image.to(device=lowercase_ , dtype=lowercase_ )
UpperCAmelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase = image
else:
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ )
]
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
else:
UpperCAmelCase = self.movq.encode(lowercase_ ).latent_dist.sample(lowercase_ )
UpperCAmelCase = self.movq.config.scaling_factor * init_latents
UpperCAmelCase = torch.cat([init_latents] , dim=0 )
UpperCAmelCase = init_latents.shape
UpperCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
UpperCAmelCase = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = init_latents
return latents
def UpperCAmelCase__ ( self :int , lowercase_ :int=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self :str , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 1_00 , lowercase_ :float = 4.0 , lowercase_ :float = 0.3 , lowercase_ :int = 1 , lowercase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , ) -> List[str]:
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase = image_embeds.shape[0]
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [image]
if not all(isinstance(lowercase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(lowercase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCAmelCase = torch.cat([prepare_image(lowercase_ , lowercase_ , lowercase_ ) for i in image] , dim=0 )
UpperCAmelCase = image.to(dtype=image_embeds.dtype , device=lowercase_ )
UpperCAmelCase = self.movq.encode(lowercase_ )['latents']
UpperCAmelCase = latents.repeat_interleave(lowercase_ , dim=0 )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
UpperCAmelCase = self.prepare_latents(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , image_embeds.dtype , lowercase_ , lowercase_ )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'image_embeds': image_embeds}
UpperCAmelCase = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 78 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ ):
if number > 0:
raise ValueError('input must be a negative integer' )
UpperCAmelCase = len(bin(lowercase_ )[3:] )
UpperCAmelCase = bin(abs(lowercase_ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase = (
(
'1'
+ '0' * (binary_number_length - len(lowercase_ ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = x
UpperCAmelCase = y
for step in range(lowercase_ ): # noqa: B007
UpperCAmelCase = a * a - b * b + x
UpperCAmelCase = 2 * a * b + y
UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) )
def _lowerCAmelCase ( lowercase_ = 800 , lowercase_ = 600 , lowercase_ = -0.6 , lowercase_ = 0 , lowercase_ = 3.2 , lowercase_ = 50 , lowercase_ = True , ):
UpperCAmelCase = Image.new('RGB' , (image_width, image_height) )
UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(lowercase_ ):
for image_y in range(lowercase_ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase = figure_width / image_width * image_height
UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase = get_distance(lowercase_ , lowercase_ , lowercase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase = get_color_coded_rgb(lowercase_ )
else:
UpperCAmelCase = get_black_and_white_rgb(lowercase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
snake_case_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 78 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case_ = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ):
if attention_mask is None:
UpperCAmelCase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] , lowercase_ :Optional[int] , lowercase_ :List[str]=13 , lowercase_ :Optional[int]=7 , lowercase_ :Dict=True , lowercase_ :Union[str, Any]=False , lowercase_ :Optional[int]=99 , lowercase_ :Dict=16 , lowercase_ :List[Any]=2 , lowercase_ :str=4 , lowercase_ :Any=4 , lowercase_ :str="gelu" , lowercase_ :Optional[int]=0.1 , lowercase_ :int=0.1 , lowercase_ :Tuple=32 , lowercase_ :Tuple=2 , lowercase_ :List[str]=1 , lowercase_ :Dict=0 , lowercase_ :Optional[int]=0.02 , ) -> List[str]:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = initializer_range
def UpperCAmelCase__ ( self :int ) -> Dict:
UpperCAmelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase = shift_tokens_right(lowercase_ , 1 , 2 )
UpperCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase_ , )
UpperCAmelCase = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCAmelCase__ ( self :str ) -> Any:
UpperCAmelCase , UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase__ ( self :int , lowercase_ :int , lowercase_ :str , lowercase_ :int ) -> Any:
UpperCAmelCase = 20
UpperCAmelCase = model_class_name(lowercase_ )
UpperCAmelCase = model.encode(inputs_dict['input_ids'] )
UpperCAmelCase , UpperCAmelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , )
UpperCAmelCase = model.decode(lowercase_ , lowercase_ )
UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def UpperCAmelCase__ ( self :int , lowercase_ :Any , lowercase_ :List[str] , lowercase_ :Any ) -> Dict:
UpperCAmelCase = 20
UpperCAmelCase = model_class_name(lowercase_ )
UpperCAmelCase = model.encode(inputs_dict['input_ids'] )
UpperCAmelCase , UpperCAmelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase = model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_ )
UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class A_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = 99
def UpperCAmelCase__ ( self :List[Any] ) -> Any:
UpperCAmelCase = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase = input_ids.shape[0]
UpperCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Any:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_config_and_data()
UpperCAmelCase = FlaxBlenderbotForConditionalGeneration(lowercase_ )
UpperCAmelCase = lm_model(input_ids=lowercase_ )
UpperCAmelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowercase_ )
def UpperCAmelCase__ ( self :Tuple ) -> Optional[int]:
UpperCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase = FlaxBlenderbotForConditionalGeneration(lowercase_ )
UpperCAmelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase = lm_model(input_ids=lowercase_ , decoder_input_ids=lowercase_ )
UpperCAmelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowercase_ )
def UpperCAmelCase__ ( self :str ) -> List[Any]:
UpperCAmelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase = shift_tokens_right(lowercase_ , 1 , 2 )
UpperCAmelCase = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
UpperCAmelCase = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowercase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
UpperCAmelCase = FlaxBlenderbotModelTester(self )
def UpperCAmelCase__ ( self :Dict ) -> Dict:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Tuple ) -> Dict:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :int ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowercase_ , lowercase_ )
UpperCAmelCase = model_class(lowercase_ )
@jax.jit
def encode_jitted(lowercase_ :Dict , lowercase_ :Any=None , **lowercase_ :Optional[int] ):
return model.encode(input_ids=lowercase_ , attention_mask=lowercase_ )
with self.subTest('JIT Enabled' ):
UpperCAmelCase = encode_jitted(**lowercase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCAmelCase = encode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase__ ( self :str ) -> Tuple:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = model_class(lowercase_ )
UpperCAmelCase = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
UpperCAmelCase = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase_ :str , lowercase_ :str , lowercase_ :Union[str, Any] ):
return model.decode(
decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , )
with self.subTest('JIT Enabled' ):
UpperCAmelCase = decode_jitted(**lowercase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCAmelCase = decode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase = model(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[Any]:
UpperCAmelCase = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
UpperCAmelCase = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
UpperCAmelCase = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=lowercase_ )
UpperCAmelCase = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
UpperCAmelCase = ['Sam']
UpperCAmelCase = tokenizer(lowercase_ , return_tensors='jax' )
UpperCAmelCase = model.generate(**lowercase_ , **lowercase_ )
UpperCAmelCase = 'Sam is a great name. It means "sun" in Gaelic.'
UpperCAmelCase = tokenizer.batch_decode(lowercase_ , **lowercase_ )
assert generated_txt[0].strip() == tgt_text
| 78 |
"""simple docstring"""
import requests
snake_case_ = """""" # <-- Put your OpenWeatherMap appid here!
snake_case_ = """https://api.openweathermap.org/data/2.5/"""
def _lowerCAmelCase ( lowercase_ = "Chicago" , lowercase_ = APPID ):
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def _lowerCAmelCase ( lowercase_ = "Kolkata, India" , lowercase_ = APPID ):
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def _lowerCAmelCase ( lowercase_ = 5_5.6_8 , lowercase_ = 1_2.5_7 , lowercase_ = APPID ):
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
snake_case_ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 78 | 1 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def _lowerCAmelCase ( lowercase_ ):
return (gray > 127) & (gray <= 255)
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = np.zeros_like(lowercase_ )
UpperCAmelCase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCAmelCase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCAmelCase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCAmelCase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
snake_case_ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
snake_case_ = np.array(Image.open(lena_path))
# kernel to be applied
snake_case_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
snake_case_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
snake_case_ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 78 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""image_processor""", """tokenizer"""]
__UpperCamelCase = """LayoutLMv2ImageProcessor"""
__UpperCamelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self :Any , lowercase_ :int=None , lowercase_ :Union[str, Any]=None , **lowercase_ :Optional[Any] ) -> Dict:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
UpperCAmelCase = kwargs.pop('feature_extractor' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :str , lowercase_ :Optional[int] , lowercase_ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ :Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ :Optional[Union[List[int], List[List[int]]]] = None , lowercase_ :bool = True , lowercase_ :Union[bool, str, PaddingStrategy] = False , lowercase_ :Union[bool, str, TruncationStrategy] = None , lowercase_ :Optional[int] = None , lowercase_ :int = 0 , lowercase_ :Optional[int] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[bool] = None , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = True , lowercase_ :Optional[Union[str, TensorType]] = None , **lowercase_ :Any , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase = features['words']
UpperCAmelCase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
UpperCAmelCase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
UpperCAmelCase = self.get_overflowing_images(lowercase_ , encoded_inputs['overflow_to_sample_mapping'] )
UpperCAmelCase = images
return encoded_inputs
def UpperCAmelCase__ ( self :Dict , lowercase_ :List[Any] , lowercase_ :Any ) -> Optional[Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(lowercase_ )} and {len(lowercase_ )}""" )
return images_with_overflow
def UpperCAmelCase__ ( self :Any , *lowercase_ :int , **lowercase_ :Tuple ) -> Tuple:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , *lowercase_ :List[Any] , **lowercase_ :Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ ( self :int ) -> Dict:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 78 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = [0 for i in range(r + 1 )]
# nc0 = 1
UpperCAmelCase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
UpperCAmelCase = min(lowercase_ , lowercase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 78 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Union[str, Any] ) -> str:
UpperCAmelCase = {}
def UpperCAmelCase__ ( self :Any , lowercase_ :List[Any] , lowercase_ :List[str] , lowercase_ :Dict=1 ) -> List[Any]:
if self.graph.get(lowercase_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCAmelCase = [[w, v]]
if not self.graph.get(lowercase_ ):
UpperCAmelCase = []
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
return list(self.graph )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Optional[int] , lowercase_ :Optional[Any] ) -> Dict:
if self.graph.get(lowercase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Tuple=-2 , lowercase_ :List[Any]=-1 ) -> List[Any]:
if s == d:
return []
UpperCAmelCase = []
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return visited
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int=-1 ) -> Tuple:
if c == -1:
UpperCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(lowercase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase_ , lowercase_ , 1 )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :Optional[Any]=-2 ) -> Optional[int]:
UpperCAmelCase = deque()
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
d.append(lowercase_ )
visited.append(lowercase_ )
while d:
UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[int] ) -> List[Any]:
UpperCAmelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[str] ) -> List[str]:
return len(self.graph[u] )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Any=-2 ) -> int:
UpperCAmelCase = []
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return sorted_nodes
def UpperCAmelCase__ ( self :str ) -> str:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return list(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> Tuple:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return False
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int=-2 , lowercase_ :List[str]=-1 ) -> Any:
UpperCAmelCase = time()
self.dfs(lowercase_ , lowercase_ )
UpperCAmelCase = time()
return end - begin
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[str]=-2 ) -> str:
UpperCAmelCase = time()
self.bfs(lowercase_ )
UpperCAmelCase = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :List[str] ) -> Union[str, Any]:
UpperCAmelCase = {}
def UpperCAmelCase__ ( self :str , lowercase_ :Dict , lowercase_ :Optional[Any] , lowercase_ :Optional[int]=1 ) -> Dict:
# check if the u exists
if self.graph.get(lowercase_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCAmelCase = [[w, v]]
# add the other way
if self.graph.get(lowercase_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCAmelCase = [[w, u]]
def UpperCAmelCase__ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :Tuple ) -> Optional[Any]:
if self.graph.get(lowercase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_ )
# the other way round
if self.graph.get(lowercase_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Optional[int]=-2 , lowercase_ :Optional[int]=-1 ) -> List[str]:
if s == d:
return []
UpperCAmelCase = []
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return visited
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Optional[int]=-1 ) -> Any:
if c == -1:
UpperCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(lowercase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase_ , lowercase_ , 1 )
def UpperCAmelCase__ ( self :Dict , lowercase_ :int=-2 ) -> int:
UpperCAmelCase = deque()
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
d.append(lowercase_ )
visited.append(lowercase_ )
while d:
UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[Any] ) -> str:
return len(self.graph[u] )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Any:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return list(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return False
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Union[str, Any]:
return list(self.graph )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Union[str, Any]=-2 , lowercase_ :List[str]=-1 ) -> str:
UpperCAmelCase = time()
self.dfs(lowercase_ , lowercase_ )
UpperCAmelCase = time()
return end - begin
def UpperCAmelCase__ ( self :Any , lowercase_ :int=-2 ) -> str:
UpperCAmelCase = time()
self.bfs(lowercase_ )
UpperCAmelCase = time()
return end - begin
| 78 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _lowerCAmelCase ( ):
UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
UpperCAmelCase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert('RGB' )
return image
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = dct.pop(lowercase_ )
UpperCAmelCase = val
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
UpperCAmelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
UpperCAmelCase = torch.cat((q_bias, torch.zeros_like(lowercase_ , requires_grad=lowercase_ ), v_bias) )
UpperCAmelCase = qkv_bias
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = 364 if 'coco' in model_name else 224
UpperCAmelCase = BlipaVisionConfig(image_size=lowercase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=lowercase_ ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=lowercase_ ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
UpperCAmelCase = BlipaConfig(vision_config=lowercase_ , text_config=lowercase_ )
return config, image_size
@torch.no_grad()
def _lowerCAmelCase ( lowercase_ , lowercase_=None , lowercase_=False ):
UpperCAmelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
UpperCAmelCase = tokenizer('\n' , add_special_tokens=lowercase_ ).input_ids[0]
UpperCAmelCase , UpperCAmelCase = get_blipa_config(lowercase_ , eos_token_id=lowercase_ )
UpperCAmelCase = BlipaForConditionalGeneration(lowercase_ ).eval()
UpperCAmelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
UpperCAmelCase , UpperCAmelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = load_model_and_preprocess(
name=lowercase_ , model_type=lowercase_ , is_eval=lowercase_ , device=lowercase_ )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCAmelCase = original_model.state_dict()
UpperCAmelCase = create_rename_keys(lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase = state_dict.pop(lowercase_ )
if key.startswith('Qformer.bert' ):
UpperCAmelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCAmelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
UpperCAmelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCAmelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
UpperCAmelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
UpperCAmelCase = key.replace('t5' , 'language' )
UpperCAmelCase = val
# read in qv biases
read_in_q_v_bias(lowercase_ , lowercase_ )
UpperCAmelCase , UpperCAmelCase = hf_model.load_state_dict(lowercase_ , strict=lowercase_ )
assert len(lowercase_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase = load_demo_image()
UpperCAmelCase = vis_processors['eval'](lowercase_ ).unsqueeze(0 ).to(lowercase_ )
UpperCAmelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(lowercase_ )
# create processor
UpperCAmelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowercase_ , image_std=lowercase_ )
UpperCAmelCase = BlipaProcessor(image_processor=lowercase_ , tokenizer=lowercase_ )
UpperCAmelCase = processor(images=lowercase_ , return_tensors='pt' ).pixel_values.to(lowercase_ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase_ , lowercase_ )
original_model.to(lowercase_ )
hf_model.to(lowercase_ )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
UpperCAmelCase = hf_model(lowercase_ , lowercase_ ).logits
else:
UpperCAmelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
UpperCAmelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCAmelCase = hf_model(lowercase_ , lowercase_ , labels=lowercase_ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=lowercase_ )
assert torch.allclose(logits[0, :3, :3] , lowercase_ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=lowercase_ )
else:
# cast to same type
UpperCAmelCase = logits.dtype
assert torch.allclose(original_logits.to(lowercase_ ) , lowercase_ , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
UpperCAmelCase = ''
UpperCAmelCase = tokenizer(lowercase_ , return_tensors='pt' ).input_ids.to(lowercase_ )
UpperCAmelCase = original_model.generate({'image': original_pixel_values} )
UpperCAmelCase = hf_model.generate(
lowercase_ , lowercase_ , do_sample=lowercase_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , lowercase_ )
UpperCAmelCase = input_ids.shape[1]
UpperCAmelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase_ )
UpperCAmelCase = [text.strip() for text in output_text]
print('HF generation:' , lowercase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase_ )
hf_model.save_pretrained(lowercase_ )
if push_to_hub:
processor.push_to_hub(F"""nielsr/{model_name}""" )
hf_model.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
snake_case_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
snake_case_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 78 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 78 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ , lowercase_ = " " ):
UpperCAmelCase = []
UpperCAmelCase = 0
for index, char in enumerate(lowercase_ ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase = index + 1
elif index + 1 == len(lowercase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 78 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
snake_case_ = logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Tuple , *lowercase_ :Any , **lowercase_ :str ) -> None:
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 78 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
snake_case_ = logging.getLogger(__name__)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = git.Repo(search_parent_directories=lowercase_ )
UpperCAmelCase = {
'repo_id': str(lowercase_ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , 'git_log.json' ) , 'w' ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def _lowerCAmelCase ( lowercase_ ):
if params.n_gpu <= 0:
UpperCAmelCase = 0
UpperCAmelCase = -1
UpperCAmelCase = True
UpperCAmelCase = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCAmelCase = int(os.environ['WORLD_SIZE'] )
UpperCAmelCase = int(os.environ['N_GPU_NODE'] )
UpperCAmelCase = int(os.environ['RANK'] )
# number of nodes / node ID
UpperCAmelCase = params.world_size // params.n_gpu_per_node
UpperCAmelCase = params.global_rank // params.n_gpu_per_node
UpperCAmelCase = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 1
UpperCAmelCase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCAmelCase = params.node_id == 0 and params.local_rank == 0
UpperCAmelCase = params.n_nodes > 1
# summary
UpperCAmelCase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _lowerCAmelCase ( lowercase_ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 78 | 1 |
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _lowerCAmelCase ( lowercase_ ):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _lowerCAmelCase ( ):
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _lowerCAmelCase ( ):
UpperCAmelCase = 'mock-s3-bucket'
UpperCAmelCase = F"""s3://{mock_bucket}"""
UpperCAmelCase = extract_path_from_uri(lowercase_ )
assert dataset_path.startswith('s3://' ) is False
UpperCAmelCase = './local/path'
UpperCAmelCase = extract_path_from_uri(lowercase_ )
assert dataset_path == new_dataset_path
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = is_remote_filesystem(lowercase_ )
assert is_remote is True
UpperCAmelCase = fsspec.filesystem('file' )
UpperCAmelCase = is_remote_filesystem(lowercase_ )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class' , lowercase_ )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
UpperCAmelCase = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowercase_ )
UpperCAmelCase = fsspec.filesystem(compression_fs_class.protocol , fo=lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
UpperCAmelCase = os.path.basename(lowercase_ )
UpperCAmelCase = expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(lowercase_ , 'r' , encoding='utf-8' ) as f, open(lowercase_ , encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol' , ['zip', 'gzip'] )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
UpperCAmelCase = compressed_file_paths[protocol]
UpperCAmelCase = 'dataset.jsonl'
UpperCAmelCase = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
UpperCAmelCase , *UpperCAmelCase = fsspec.get_fs_token_paths(lowercase_ )
assert fs.isfile(lowercase_ )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = hf_api.dataset_info(lowercase_ , token=lowercase_ )
UpperCAmelCase = HfFileSystem(repo_info=lowercase_ , token=lowercase_ )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(lowercase_ ) as f:
assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read()
def _lowerCAmelCase ( ):
UpperCAmelCase = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowercase_ , lowercase_ , clobber=lowercase_ )
with pytest.warns(lowercase_ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowercase_ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 78 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
snake_case_ = """1"""
snake_case_ = """0"""
snake_case_ = """1"""
snake_case_ = ort.SessionOptions()
snake_case_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
snake_case_ = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
snake_case_ = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
snake_case_ = ort.RunOptions()
snake_case_ = 128
snake_case_ = 1
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
snake_case_ = time.time()
snake_case_ = 2000
snake_case_ = {}
for iter in range(max_iters):
snake_case_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1000 / max_iters))
| 78 | 1 |
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@register_to_config
def __init__( self :List[str] , lowercase_ :bool , lowercase_ :Optional[int] = None , lowercase_ :Optional[int] = None ) -> List[Any]:
super().__init__()
UpperCAmelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase = torch.zeros(lowercase_ , lowercase_ )
else:
UpperCAmelCase = None
UpperCAmelCase = torch.nn.Parameter(lowercase_ )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self :Union[str, Any] , lowercase_ :VQModel , lowercase_ :CLIPTextModel , lowercase_ :CLIPTokenizer , lowercase_ :TransformeraDModel , lowercase_ :VQDiffusionScheduler , lowercase_ :LearnedClassifierFreeSamplingEmbeddings , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vqvae=lowercase_ , transformer=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , scheduler=lowercase_ , learned_classifier_free_sampling_embeddings=lowercase_ , )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[Any] , lowercase_ :str , lowercase_ :Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase = len(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else 1
# get prompt text embeddings
UpperCAmelCase = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowercase_ )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase = prompt_embeds.repeat_interleave(lowercase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(lowercase_ , 1 , 1 )
else:
UpperCAmelCase = [''] * batch_size
UpperCAmelCase = text_input_ids.shape[-1]
UpperCAmelCase = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowercase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase = negative_prompt_embeds.shape[1]
UpperCAmelCase = negative_prompt_embeds.repeat(1 , lowercase_ , 1 )
UpperCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self :str , lowercase_ :Union[str, List[str]] , lowercase_ :int = 1_00 , lowercase_ :float = 5.0 , lowercase_ :float = 1.0 , lowercase_ :int = 1 , lowercase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ :Optional[torch.FloatTensor] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , lowercase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ :int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = 1
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = len(lowercase_ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowercase_ )}""" )
UpperCAmelCase = batch_size * num_images_per_prompt
UpperCAmelCase = guidance_scale > 1.0
UpperCAmelCase = self._encode_prompt(lowercase_ , lowercase_ , lowercase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(lowercase_ )}.""" )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase = self.transformer.num_vector_embeds - 1
UpperCAmelCase = torch.full(lowercase_ , lowercase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
UpperCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
UpperCAmelCase = self.scheduler.timesteps.to(self.device )
UpperCAmelCase = latents
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase = self.transformer(lowercase_ , encoder_hidden_states=lowercase_ , timestep=lowercase_ ).sample
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = model_output.chunk(2 )
UpperCAmelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowercase_ , dim=1 , keepdim=lowercase_ )
UpperCAmelCase = self.truncate(lowercase_ , lowercase_ )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = self.vqvae.config.vq_embed_dim
UpperCAmelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase = self.vqvae.quantize.get_codebook_entry(lowercase_ , shape=lowercase_ )
UpperCAmelCase = self.vqvae.decode(lowercase_ , force_not_quantize=lowercase_ ).sample
UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
def UpperCAmelCase__ ( self :int , lowercase_ :torch.FloatTensor , lowercase_ :float ) -> torch.FloatTensor:
UpperCAmelCase , UpperCAmelCase = torch.sort(lowercase_ , 1 , descending=lowercase_ )
UpperCAmelCase = torch.exp(lowercase_ )
UpperCAmelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase = torch.full_like(keep_mask[:, 0:1, :] , lowercase_ )
UpperCAmelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCAmelCase = keep_mask[:, :-1, :]
UpperCAmelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCAmelCase = log_p_x_0.clone()
UpperCAmelCase = -torch.inf # -inf = log(0)
return rv
| 78 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case_ = logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""pixel_values"""]
def __init__( self :int , lowercase_ :bool = True , lowercase_ :Dict[str, int] = None , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :bool = True , lowercase_ :Union[int, float] = 1 / 2_55 , lowercase_ :bool = True , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :bool = True , **lowercase_ :Union[str, Any] , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase = size if size is not None else {'height': 3_84, 'width': 3_84}
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase = do_convert_rgb
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :np.ndarray , lowercase_ :Dict[str, int] , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Any , ) -> np.ndarray:
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
UpperCAmelCase = (size['height'], size['width'])
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :np.ndarray , lowercase_ :Union[int, float] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[int] , ) -> int:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :np.ndarray , lowercase_ :Union[float, List[float]] , lowercase_ :Union[float, List[float]] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[Any] , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :ImageInput , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Dict[str, int]] = None , lowercase_ :PILImageResampling = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[float] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[str, TensorType]] = None , lowercase_ :bool = None , lowercase_ :ChannelDimension = ChannelDimension.FIRST , **lowercase_ :Tuple , ) -> PIL.Image.Image:
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase_ )
return encoded_outputs
| 78 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ = 0 , lowercase_ = 0 ):
UpperCAmelCase = right or len(lowercase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowercase_ , lowercase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """beit"""
def __init__( self :List[str] , lowercase_ :List[Any]=81_92 , lowercase_ :str=7_68 , lowercase_ :List[str]=12 , lowercase_ :Optional[int]=12 , lowercase_ :Dict=30_72 , lowercase_ :Tuple="gelu" , lowercase_ :Any=0.0 , lowercase_ :Optional[int]=0.0 , lowercase_ :Dict=0.02 , lowercase_ :int=1E-12 , lowercase_ :List[Any]=2_24 , lowercase_ :Dict=16 , lowercase_ :List[Any]=3 , lowercase_ :List[str]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Union[str, Any]=0.1 , lowercase_ :str=0.1 , lowercase_ :str=True , lowercase_ :List[str]=[3, 5, 7, 11] , lowercase_ :Optional[int]=[1, 2, 3, 6] , lowercase_ :str=True , lowercase_ :int=0.4 , lowercase_ :Union[str, Any]=2_56 , lowercase_ :int=1 , lowercase_ :Tuple=False , lowercase_ :Optional[int]=2_55 , **lowercase_ :str , ) -> Any:
super().__init__(**lowercase_ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = use_mask_token
UpperCAmelCase = use_absolute_position_embeddings
UpperCAmelCase = use_relative_position_bias
UpperCAmelCase = use_shared_relative_position_bias
UpperCAmelCase = layer_scale_init_value
UpperCAmelCase = drop_path_rate
UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase = out_indices
UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase = use_auxiliary_head
UpperCAmelCase = auxiliary_loss_weight
UpperCAmelCase = auxiliary_channels
UpperCAmelCase = auxiliary_num_convs
UpperCAmelCase = auxiliary_concat_input
UpperCAmelCase = semantic_loss_ignore_index
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self :Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase__ ( self :Tuple ) -> float:
return 1E-4
| 78 | 1 |
"""simple docstring"""
from PIL import Image
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = (259 * (level + 255)) / (255 * (259 - level))
def contrast(lowercase_ ) -> int:
return int(128 + factor * (c - 128) )
return img.point(lowercase_ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
snake_case_ = change_contrast(img, 170)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 78 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case_ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
snake_case_ = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
snake_case_ = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
snake_case_ = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , lowercase_ ).groups()[0]
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :Dict , lowercase_ :List[str]=None , lowercase_ :Optional[Any]=None ) -> Optional[int]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self :Optional[int] ) -> Optional[Any]:
return len(self.file_names )
def __getitem__( self :int , lowercase_ :str ) -> List[str]:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowercase_ )
UpperCAmelCase = raw_image.convert('RGB' )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowercase_ )
UpperCAmelCase = extract_label(lowercase_ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['lr']
UpperCAmelCase = int(config['num_epochs'] )
UpperCAmelCase = int(config['seed'] )
UpperCAmelCase = int(config['batch_size'] )
UpperCAmelCase = config['image_size']
if not isinstance(lowercase_ , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(lowercase_ )[-1].split('.' )[0]
accelerator.init_trackers(lowercase_ , lowercase_ )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , lowercase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
UpperCAmelCase = [extract_label(lowercase_ ) for fname in file_names]
UpperCAmelCase = list(set(lowercase_ ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(lowercase_ )}
# Set the seed before splitting the data.
np.random.seed(lowercase_ )
torch.manual_seed(lowercase_ )
torch.cuda.manual_seed_all(lowercase_ )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(lowercase_ ) )
UpperCAmelCase = int(0.8 * len(lowercase_ ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(lowercase_ , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(lowercase_ ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model('resnet50d' , pretrained=lowercase_ , num_classes=len(lowercase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=lowercase_ , max_lr=lowercase_ , epochs=lowercase_ , steps_per_epoch=len(lowercase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(lowercase_ )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace('epoch_' , '' ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace('step_' , '' ) )
UpperCAmelCase = resume_step // len(lowercase_ )
resume_step -= starting_epoch * len(lowercase_ )
# Now we train the model
for epoch in range(lowercase_ , lowercase_ ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(lowercase_ , lowercase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch['image'] - mean) / std
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = torch.nn.functional.cross_entropy(lowercase_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch['image'] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['label']) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(lowercase_ ),
'epoch': epoch,
} , step=lowercase_ , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase ( ):
UpperCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=lowercase_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=lowercase_ , default=lowercase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=lowercase_ , default=lowercase_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=lowercase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=lowercase_ , default=lowercase_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=lowercase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 78 | 1 |
"""simple docstring"""
from collections import deque
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = len(lowercase_ )
UpperCAmelCase = deque()
UpperCAmelCase = [False for _ in range(lowercase_ )]
UpperCAmelCase = [-1 for _ in range(lowercase_ )]
UpperCAmelCase = index_of[:]
def strong_connect(lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = index # the number when this node is seen
UpperCAmelCase = index # lowest rank node reachable from here
index += 1
stack.append(lowercase_ )
UpperCAmelCase = True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase = strong_connect(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase = []
UpperCAmelCase = stack.pop()
UpperCAmelCase = False
component.append(lowercase_ )
while w != v:
UpperCAmelCase = stack.pop()
UpperCAmelCase = False
component.append(lowercase_ )
components.append(lowercase_ )
return index
UpperCAmelCase = []
for v in range(lowercase_ ):
if index_of[v] == -1:
strong_connect(lowercase_ , 0 , lowercase_ )
return components
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = [[] for _ in range(lowercase_ )]
for u, v in edges:
g[u].append(lowercase_ )
return g
if __name__ == "__main__":
# Test
snake_case_ = 7
snake_case_ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
snake_case_ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
snake_case_ = [(u, v) for u, v in zip(source, target)]
snake_case_ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 78 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = list(range(len(lowercase_ ) ) )
UpperCAmelCase = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
UpperCAmelCase = 0
UpperCAmelCase = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"""vocab_file""": """vocab.txt"""}
snake_case_ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
snake_case_ = {
"""facebook/esm2_t6_8M_UR50D""": 1024,
"""facebook/esm2_t12_35M_UR50D""": 1024,
}
def _lowerCAmelCase ( lowercase_ ):
with open(lowercase_ , 'r' ) as f:
UpperCAmelCase = f.read().splitlines()
return [l.strip() for l in lines]
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self :str , lowercase_ :Optional[int] , lowercase_ :List[Any]="<unk>" , lowercase_ :Tuple="<cls>" , lowercase_ :Union[str, Any]="<pad>" , lowercase_ :Tuple="<mask>" , lowercase_ :str="<eos>" , **lowercase_ :List[Any] , ) -> List[Any]:
super().__init__(**lowercase_ )
UpperCAmelCase = load_vocab_file(lowercase_ )
UpperCAmelCase = dict(enumerate(self.all_tokens ) )
UpperCAmelCase = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase = unk_token
UpperCAmelCase = cls_token
UpperCAmelCase = pad_token
UpperCAmelCase = mask_token
UpperCAmelCase = eos_token
UpperCAmelCase = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :int ) -> str:
return self._id_to_token.get(lowercase_ , self.unk_token )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str ) -> int:
return self._token_to_id.get(lowercase_ , self._token_to_id.get(self.unk_token ) )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :int , **lowercase_ :Optional[Any] ) -> int:
return text.split()
def UpperCAmelCase__ ( self :str , lowercase_ :List[str]=False ) -> Union[str, Any]:
return len(self._id_to_token )
def UpperCAmelCase__ ( self :List[str] ) -> Optional[int]:
return {token: i for i, token in enumerate(self.all_tokens )}
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str ) -> int:
return self._token_to_id.get(lowercase_ , self._token_to_id.get(self.unk_token ) )
def UpperCAmelCase__ ( self :int , lowercase_ :int ) -> str:
return self._id_to_token.get(lowercase_ , self.unk_token )
def UpperCAmelCase__ ( self :int , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase__ ( self :str , lowercase_ :List , lowercase_ :Optional[List] = None , lowercase_ :bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase = [1] + ([0] * len(lowercase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowercase_ ) + [1]
return mask
def UpperCAmelCase__ ( self :Any , lowercase_ :List[Any] , lowercase_ :int ) -> int:
UpperCAmelCase = os.path.join(lowercase_ , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(lowercase_ , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def UpperCAmelCase__ ( self :Any ) -> int:
return self.get_vocab_size(with_added_tokens=lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Union[List[str], List[AddedToken]] , lowercase_ :bool = False ) -> int:
return super()._add_tokens(lowercase_ , special_tokens=lowercase_ )
| 78 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Any , *lowercase_ :str , **lowercase_ :List[Any] ) -> Union[str, Any]:
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any=None , lowercase_ :Optional[int]=None , lowercase_ :Tuple=None , **lowercase_ :Tuple ) -> Dict:
UpperCAmelCase , UpperCAmelCase = {}, {}
if padding is not None:
UpperCAmelCase = padding
if truncation is not None:
UpperCAmelCase = truncation
if top_k is not None:
UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self :List[Any] , lowercase_ :Union["Image.Image", str] , lowercase_ :str = None , **lowercase_ :Union[str, Any] ) -> Union[str, Any]:
if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = {'image': image, 'question': question}
else:
UpperCAmelCase = image
UpperCAmelCase = super().__call__(lowercase_ , **lowercase_ )
return results
def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[Any] , lowercase_ :int=False , lowercase_ :Optional[int]=False ) -> Union[str, Any]:
UpperCAmelCase = load_image(inputs['image'] )
UpperCAmelCase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ )
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[str] ) -> Any:
UpperCAmelCase = self.model(**lowercase_ )
return model_outputs
def UpperCAmelCase__ ( self :Dict , lowercase_ :Tuple , lowercase_ :List[Any]=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.sigmoid()[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(lowercase_ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 78 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case_ = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """transfo-xl"""
__UpperCamelCase = ["""mems"""]
__UpperCamelCase = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :List[Any] , lowercase_ :Optional[int]=26_77_35 , lowercase_ :Union[str, Any]=[2_00_00, 4_00_00, 20_00_00] , lowercase_ :List[Any]=10_24 , lowercase_ :Optional[Any]=10_24 , lowercase_ :Tuple=16 , lowercase_ :Tuple=64 , lowercase_ :Any=40_96 , lowercase_ :int=4 , lowercase_ :List[str]=False , lowercase_ :Union[str, Any]=18 , lowercase_ :Optional[Any]=16_00 , lowercase_ :Dict=10_00 , lowercase_ :Optional[int]=True , lowercase_ :Tuple=True , lowercase_ :Dict=0 , lowercase_ :Tuple=-1 , lowercase_ :Optional[int]=True , lowercase_ :Optional[int]=0.1 , lowercase_ :str=0.0 , lowercase_ :List[str]=True , lowercase_ :int="normal" , lowercase_ :Dict=0.01 , lowercase_ :Optional[Any]=0.01 , lowercase_ :Dict=0.02 , lowercase_ :Tuple=1E-5 , lowercase_ :str=0 , **lowercase_ :Tuple , ) -> List[str]:
UpperCAmelCase = vocab_size
UpperCAmelCase = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
UpperCAmelCase = [False] + [True] * len(self.cutoffs )
else:
UpperCAmelCase = [False] + [False] * len(self.cutoffs )
UpperCAmelCase = d_model
UpperCAmelCase = d_embed
UpperCAmelCase = d_head
UpperCAmelCase = d_inner
UpperCAmelCase = div_val
UpperCAmelCase = pre_lnorm
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = mem_len
UpperCAmelCase = same_length
UpperCAmelCase = attn_type
UpperCAmelCase = clamp_len
UpperCAmelCase = sample_softmax
UpperCAmelCase = adaptive
UpperCAmelCase = dropout
UpperCAmelCase = dropatt
UpperCAmelCase = untie_r
UpperCAmelCase = init
UpperCAmelCase = init_range
UpperCAmelCase = proj_init_std
UpperCAmelCase = init_std
UpperCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Any:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any ) -> Tuple:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 78 | 1 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = [False] * len(lowercase_ )
UpperCAmelCase = [-1] * len(lowercase_ )
def dfs(lowercase_ , lowercase_ ):
UpperCAmelCase = True
UpperCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(lowercase_ , 1 - c )
for i in range(len(lowercase_ ) ):
if not visited[i]:
dfs(lowercase_ , 0 )
for i in range(len(lowercase_ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
snake_case_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 78 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowerCAmelCase ( lowercase_ = "isbn/0140328726" ):
UpperCAmelCase = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
UpperCAmelCase = F"""{olid} is not a valid Open Library olid"""
raise ValueError(lowercase_ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
UpperCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
UpperCAmelCase = data['First sentence']['value']
for key, value in data.items():
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = ', '.join(lowercase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
snake_case_ = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
snake_case_ = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 78 | 1 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
snake_case_ = logging.get_logger(__name__)
snake_case_ = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
snake_case_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _lowerCAmelCase ( lowercase_ ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase = model_type_to_module_name(lowercase_ )
UpperCAmelCase = importlib.import_module(F""".{module_name}""" , 'transformers.models' )
try:
return getattr(lowercase_ , lowercase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowercase_ , '__name__' , lowercase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase = importlib.import_module('transformers' )
if hasattr(lowercase_ , lowercase_ ):
return getattr(lowercase_ , lowercase_ )
return None
def _lowerCAmelCase ( lowercase_ , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , **lowercase_ , ):
UpperCAmelCase = get_file_from_repo(
lowercase_ , lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , resume_download=lowercase_ , proxies=lowercase_ , use_auth_token=lowercase_ , revision=lowercase_ , local_files_only=lowercase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(lowercase_ , encoding='utf-8' ) as reader:
return json.load(lowercase_ )
class A_ :
"""simple docstring"""
def __init__( self :int ) -> str:
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(lowercase_ )
def UpperCAmelCase__ ( cls :str , lowercase_ :Dict , **lowercase_ :Any ) -> str:
UpperCAmelCase = kwargs.pop('config' , lowercase_ )
UpperCAmelCase = kwargs.pop('trust_remote_code' , lowercase_ )
UpperCAmelCase = True
UpperCAmelCase , UpperCAmelCase = ImageProcessingMixin.get_image_processor_dict(lowercase_ , **lowercase_ )
UpperCAmelCase = config_dict.get('image_processor_type' , lowercase_ )
UpperCAmelCase = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
UpperCAmelCase = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCAmelCase = config_dict.pop('feature_extractor_type' , lowercase_ )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
UpperCAmelCase = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
UpperCAmelCase = config_dict['auto_map']['AutoFeatureExtractor']
UpperCAmelCase = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ , **lowercase_ )
# It could be in `config.image_processor_type``
UpperCAmelCase = getattr(lowercase_ , 'image_processor_type' , lowercase_ )
if hasattr(lowercase_ , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
UpperCAmelCase = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
UpperCAmelCase = image_processor_class_from_name(lowercase_ )
UpperCAmelCase = image_processor_auto_map is not None
UpperCAmelCase = image_processor_class is not None or type(lowercase_ ) in IMAGE_PROCESSOR_MAPPING
UpperCAmelCase = resolve_trust_remote_code(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if has_remote_code and trust_remote_code:
UpperCAmelCase = get_class_from_dynamic_module(
lowercase_ , lowercase_ , **lowercase_ )
UpperCAmelCase = kwargs.pop('code_revision' , lowercase_ )
if os.path.isdir(lowercase_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowercase_ , **lowercase_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(lowercase_ , **lowercase_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowercase_ ) in IMAGE_PROCESSOR_MAPPING:
UpperCAmelCase = IMAGE_PROCESSOR_MAPPING[type(lowercase_ )]
return image_processor_class.from_dict(lowercase_ , **lowercase_ )
raise ValueError(
f"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
f"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase__ ( lowercase_ :Optional[Any] , lowercase_ :Any ) -> Any:
IMAGE_PROCESSOR_MAPPING.register(lowercase_ , lowercase_ )
| 78 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :int , lowercase_ :Optional[int]=None , lowercase_ :List[str]=None ) -> str:
UpperCAmelCase = data
UpperCAmelCase = previous
UpperCAmelCase = next_node
def __str__( self :Optional[Any] ) -> str:
return f"""{self.data}"""
def UpperCAmelCase__ ( self :int ) -> int:
return self.data
def UpperCAmelCase__ ( self :List[str] ) -> Any:
return self.next
def UpperCAmelCase__ ( self :Tuple ) -> Optional[int]:
return self.previous
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :Optional[Any] ) -> str:
UpperCAmelCase = head
def __iter__( self :List[str] ) -> List[str]:
return self
def UpperCAmelCase__ ( self :int ) -> Any:
if not self.current:
raise StopIteration
else:
UpperCAmelCase = self.current.get_data()
UpperCAmelCase = self.current.get_next()
return value
class A_ :
"""simple docstring"""
def __init__( self :Union[str, Any] ) -> List[Any]:
UpperCAmelCase = None # First node in list
UpperCAmelCase = None # Last node in list
def __str__( self :List[Any] ) -> Optional[Any]:
UpperCAmelCase = self.head
UpperCAmelCase = []
while current is not None:
nodes.append(current.get_data() )
UpperCAmelCase = current.get_next()
return " ".join(str(lowercase_ ) for node in nodes )
def __contains__( self :str , lowercase_ :int ) -> str:
UpperCAmelCase = self.head
while current:
if current.get_data() == value:
return True
UpperCAmelCase = current.get_next()
return False
def __iter__( self :Tuple ) -> Dict:
return LinkedListIterator(self.head )
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]:
if self.head:
return self.head.get_data()
return None
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]:
if self.tail:
return self.tail.get_data()
return None
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Node ) -> None:
if self.head is None:
UpperCAmelCase = node
UpperCAmelCase = node
else:
self.insert_before_node(self.head , lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :Node ) -> None:
if self.head is None:
self.set_head(lowercase_ )
else:
self.insert_after_node(self.tail , lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int ) -> None:
UpperCAmelCase = Node(lowercase_ )
if self.head is None:
self.set_head(lowercase_ )
else:
self.set_tail(lowercase_ )
def UpperCAmelCase__ ( self :int , lowercase_ :Node , lowercase_ :Node ) -> None:
UpperCAmelCase = node
UpperCAmelCase = node.previous
if node.get_previous() is None:
UpperCAmelCase = node_to_insert
else:
UpperCAmelCase = node_to_insert
UpperCAmelCase = node_to_insert
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Node , lowercase_ :Node ) -> None:
UpperCAmelCase = node
UpperCAmelCase = node.next
if node.get_next() is None:
UpperCAmelCase = node_to_insert
else:
UpperCAmelCase = node_to_insert
UpperCAmelCase = node_to_insert
def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = 1
UpperCAmelCase = Node(lowercase_ )
UpperCAmelCase = self.head
while node:
if current_position == position:
self.insert_before_node(lowercase_ , lowercase_ )
return
current_position += 1
UpperCAmelCase = node.next
self.insert_after_node(self.tail , lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int ) -> Node:
UpperCAmelCase = self.head
while node:
if node.get_data() == item:
return node
UpperCAmelCase = node.get_next()
raise Exception('Node not found' )
def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[Any] ) -> Dict:
if (node := self.get_node(lowercase_ )) is not None:
if node == self.head:
UpperCAmelCase = self.head.get_next()
if node == self.tail:
UpperCAmelCase = self.tail.get_previous()
self.remove_node_pointers(lowercase_ )
@staticmethod
def UpperCAmelCase__ ( lowercase_ :Node ) -> None:
if node.get_next():
UpperCAmelCase = node.previous
if node.get_previous():
UpperCAmelCase = node.next
UpperCAmelCase = None
UpperCAmelCase = None
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]:
return self.head is None
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
snake_case_ = logging.getLogger(__name__)
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , lowercase_ :str , lowercase_ :Dict , lowercase_ :str , lowercase_ :List[Any]=None ) -> Union[str, Any]:
super().__init__(
lowercase_ , question_encoder_tokenizer=lowercase_ , generator_tokenizer=lowercase_ , index=lowercase_ , init_retrieval=lowercase_ , )
UpperCAmelCase = None
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :int ) -> List[str]:
logger.info('initializing retrieval' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized' )
# needs to be set manually
UpperCAmelCase = self._infer_socket_ifname()
# avoid clash with the NCCL port
UpperCAmelCase = str(distributed_port + 1 )
UpperCAmelCase = dist.new_group(ranks=lowercase_ , backend='gloo' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCAmelCase__ ( self :Tuple ) -> List[str]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCAmelCase__ ( self :int , lowercase_ :int , lowercase_ :str , lowercase_ :Dict=torch.floataa ) -> Tuple:
UpperCAmelCase = torch.empty(lowercase_ , dtype=lowercase_ )
dist.scatter(lowercase_ , src=0 , scatter_list=lowercase_ , group=self.process_group )
return target_tensor
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
UpperCAmelCase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
UpperCAmelCase = next((addr for addr in addrs if addr.startswith('e' )) , lowercase_ )
return ifname
def UpperCAmelCase__ ( self :Any , lowercase_ :np.ndarray , lowercase_ :int ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
UpperCAmelCase , UpperCAmelCase = self._main_retrieve(lowercase_ , lowercase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase_ )
# distributed training
UpperCAmelCase = dist.get_world_size(group=self.process_group )
# gather logic
UpperCAmelCase = None
if self._is_main():
UpperCAmelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowercase_ )]
dist.gather(torch.tensor(lowercase_ ) , dst=0 , gather_list=lowercase_ , group=self.process_group )
# scatter logic
UpperCAmelCase = question_hidden_states.shape[0]
UpperCAmelCase = []
UpperCAmelCase = []
if self._is_main():
assert len(lowercase_ ) == world_size
UpperCAmelCase , UpperCAmelCase = self._main_retrieve(torch.cat(lowercase_ ).numpy() , lowercase_ )
UpperCAmelCase , UpperCAmelCase = torch.tensor(lowercase_ ), torch.tensor(lowercase_ )
UpperCAmelCase = self._chunk_tensor(lowercase_ , lowercase_ )
UpperCAmelCase = self._chunk_tensor(lowercase_ , lowercase_ )
UpperCAmelCase = self._scattered(lowercase_ , [n_queries, n_docs] , target_type=torch.intaa )
UpperCAmelCase = self._scattered(lowercase_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowercase_ )
| 78 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowercase_ :int ) -> None:
UpperCAmelCase = size
UpperCAmelCase = [0] * size
UpperCAmelCase = [0] * size
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = value
while index < self.size:
UpperCAmelCase = self.get_prev(lowercase_ ) + 1
if current_left_border == index:
UpperCAmelCase = value
else:
UpperCAmelCase = max(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = self.get_next(lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int , lowercase_ :int ) -> int:
right -= 1 # Because of right is exclusive
UpperCAmelCase = 0
while left <= right:
UpperCAmelCase = self.get_prev(lowercase_ )
if left <= current_left:
UpperCAmelCase = max(lowercase_ , self.tree[right] )
UpperCAmelCase = current_left
else:
UpperCAmelCase = max(lowercase_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
snake_case_ = HUGGINGFACE_HUB_CACHE
snake_case_ = """config.json"""
snake_case_ = """diffusion_pytorch_model.bin"""
snake_case_ = """diffusion_flax_model.msgpack"""
snake_case_ = """model.onnx"""
snake_case_ = """diffusion_pytorch_model.safetensors"""
snake_case_ = """weights.pb"""
snake_case_ = """https://huggingface.co"""
snake_case_ = default_cache_path
snake_case_ = """diffusers_modules"""
snake_case_ = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
snake_case_ = ["""fp16""", """non-ema"""]
snake_case_ = """.self_attn"""
| 78 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :str = "▁" , lowercase_ :bool = True , lowercase_ :Union[str, AddedToken] = "<unk>" , lowercase_ :Union[str, AddedToken] = "</s>" , lowercase_ :Union[str, AddedToken] = "<pad>" , ) -> str:
UpperCAmelCase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
UpperCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase = token_dict['token']
UpperCAmelCase = Tokenizer(Unigram() )
UpperCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
UpperCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ),
pre_tokenizers.Digits(individual_digits=lowercase_ ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase = decoders.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
UpperCAmelCase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Union[str, List[str]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Union[str, Any]:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [files]
self._tokenizer.train(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :str , lowercase_ :Union[Iterator[str], Iterator[Iterator[str]]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Tuple:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
self._tokenizer.train_from_iterator(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = json.loads(self._tokenizer.to_str() )
UpperCAmelCase = self.special_tokens['unk']['id']
UpperCAmelCase = Tokenizer.from_str(json.dumps(lowercase_ ) )
| 78 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _lowerCAmelCase ( lowercase_ = 8 ):
UpperCAmelCase = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(lowercase_ )
UpperCAmelCase = i // 3
UpperCAmelCase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCAmelCase = (
chars_incl
+ random(lowercase_ , quotient + remainder )
+ random(lowercase_ , lowercase_ )
+ random(lowercase_ , lowercase_ )
)
UpperCAmelCase = list(lowercase_ )
shuffle(lowercase_ )
return "".join(lowercase_ )
# random is a generalised function for letters, characters and numbers
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ = 8 ):
if len(lowercase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCAmelCase = any(char in ascii_uppercase for char in password )
UpperCAmelCase = any(char in ascii_lowercase for char in password )
UpperCAmelCase = any(char in digits for char in password )
UpperCAmelCase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _lowerCAmelCase ( ):
UpperCAmelCase = int(input('Please indicate the max length of your password: ' ).strip() )
UpperCAmelCase = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(lowercase_ ) )
print(
'Alternative Password generated:' , alternative_password_generator(lowercase_ , lowercase_ ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 78 | 1 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = RoFormerTokenizer
__UpperCamelCase = RoFormerTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def UpperCAmelCase__ ( self :Optional[Any] ) -> str:
super().setUp()
def UpperCAmelCase__ ( self :int , **lowercase_ :List[str] ) -> List[Any]:
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **lowercase_ )
def UpperCAmelCase__ ( self :List[Any] , **lowercase_ :Any ) -> Any:
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **lowercase_ )
def UpperCAmelCase__ ( self :Tuple ) -> Optional[Any]:
UpperCAmelCase = '永和服装饰品有限公司,今天天气非常好'
UpperCAmelCase = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def UpperCAmelCase__ ( self :int ) -> Dict:
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase , UpperCAmelCase = self.get_chinese_input_output_texts()
UpperCAmelCase = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , output_text.split() )
UpperCAmelCase = tokens + [tokenizer.unk_token]
UpperCAmelCase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def UpperCAmelCase__ ( self :List[str] ) -> int:
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase , UpperCAmelCase = self.get_chinese_input_output_texts()
UpperCAmelCase = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , output_text.split() )
UpperCAmelCase = tokens + [tokenizer.unk_token]
UpperCAmelCase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> str:
pass
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
pass
def UpperCAmelCase__ ( self :Tuple ) -> Dict:
pass
| 78 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
"""simple docstring"""
def UpperCAmelCase__ ( self :Any ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self :List[Any] ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self :List[str] ) -> str:
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = inputs['prompt']
UpperCAmelCase = inputs['generator']
UpperCAmelCase = inputs['num_inference_steps']
UpperCAmelCase = inputs['output_type']
if "image" in inputs:
UpperCAmelCase = inputs['image']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['mask_image']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['original_image']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(lowercase_ )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase_ , lowercase_ ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = inputs['generator']
UpperCAmelCase = inputs['num_inference_steps']
UpperCAmelCase = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**lowercase_ )[0]
UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
def UpperCAmelCase__ ( self :List[Any] ) -> str:
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = pipe_loaded(**lowercase_ )[0]
UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
| 78 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
snake_case_ = logging.get_logger(__name__)
def _lowerCAmelCase ( lowercase_ ):
if isinstance(lowercase_ , np.ndarray ):
return list(tensor.shape )
UpperCAmelCase = tf.shape(lowercase_ )
if tensor.shape == tf.TensorShape(lowercase_ ):
return dynamic
UpperCAmelCase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowercase_ )]
def _lowerCAmelCase ( lowercase_ , lowercase_ = None , lowercase_ = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=lowercase_ , name=lowercase_ )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=1e-5 , lowercase_=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowercase_ , lowercase_ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
UpperCAmelCase , UpperCAmelCase = tf.nn.moments(lowercase_ , axes=[axis] , keepdims=lowercase_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCAmelCase = [1] * inputs.shape.rank
UpperCAmelCase = shape_list(lowercase_ )[axis]
UpperCAmelCase = tf.reshape(lowercase_ , lowercase_ )
UpperCAmelCase = tf.reshape(lowercase_ , lowercase_ )
# Compute layer normalization using the batch_normalization
# function.
UpperCAmelCase = tf.nn.batch_normalization(
lowercase_ , lowercase_ , lowercase_ , offset=lowercase_ , scale=lowercase_ , variance_epsilon=lowercase_ , )
return outputs
def _lowerCAmelCase ( lowercase_ , lowercase_=0 , lowercase_=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCAmelCase = tf.shape(lowercase_ )
UpperCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowercase_ , lowercase_ )
def _lowerCAmelCase ( lowercase_ ):
if not isinstance(lowercase_ , tf.Tensor ):
UpperCAmelCase = tf.convert_to_tensor(lowercase_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCAmelCase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCAmelCase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ = "input_ids" ):
tf.debugging.assert_less(
lowercase_ , tf.cast(lowercase_ , dtype=tensor.dtype ) , message=(
F"""The maximum value of {tensor_name} ({tf.math.reduce_max(lowercase_ )}) must be smaller than the embedding """
F"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCAmelCase = [x for x in data if len(lowercase_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
F"""bytes: {bad_attributes}""" )
UpperCAmelCase = np.asarray(lowercase_ )
UpperCAmelCase = 1
UpperCAmelCase = np.array_split(lowercase_ , lowercase_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCAmelCase = np.array_split(lowercase_ , lowercase_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowercase_ ):
UpperCAmelCase = chunk_data
else:
UpperCAmelCase = data
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if name in group.attrs:
UpperCAmelCase = [n.decode('utf8' ) if hasattr(lowercase_ , 'decode' ) else n for n in group.attrs[name]]
else:
UpperCAmelCase = []
UpperCAmelCase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(lowercase_ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def _lowerCAmelCase ( lowercase_ ):
def _expand_single_ad_tensor(lowercase_ ):
if isinstance(lowercase_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowercase_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowercase_ )
| 78 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=8 ):
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCAmelCase ( lowercase_ , lowercase_=512 , lowercase_=512 ):
UpperCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase = arr.astype(np.floataa ) / 1_2_7.5 - 1
UpperCAmelCase = np.transpose(lowercase_ , [2, 0, 1] )
UpperCAmelCase = torch.from_numpy(lowercase_ ).unsqueeze(0 )
return image
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :UNetaDConditionModel , lowercase_ :DDPMScheduler , lowercase_ :VQModel , ) -> List[str]:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Any ) -> Optional[int]:
# get the original timestep using init_timestep
UpperCAmelCase = min(int(num_inference_steps * strength ) , lowercase_ )
UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Dict , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Optional[Any] , lowercase_ :Any=None ) -> Any:
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" )
UpperCAmelCase = image.to(device=lowercase_ , dtype=lowercase_ )
UpperCAmelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase = image
else:
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ )
]
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
else:
UpperCAmelCase = self.movq.encode(lowercase_ ).latent_dist.sample(lowercase_ )
UpperCAmelCase = self.movq.config.scaling_factor * init_latents
UpperCAmelCase = torch.cat([init_latents] , dim=0 )
UpperCAmelCase = init_latents.shape
UpperCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
UpperCAmelCase = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = init_latents
return latents
def UpperCAmelCase__ ( self :int , lowercase_ :int=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self :str , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 1_00 , lowercase_ :float = 4.0 , lowercase_ :float = 0.3 , lowercase_ :int = 1 , lowercase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , ) -> List[str]:
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase = image_embeds.shape[0]
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [image]
if not all(isinstance(lowercase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(lowercase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCAmelCase = torch.cat([prepare_image(lowercase_ , lowercase_ , lowercase_ ) for i in image] , dim=0 )
UpperCAmelCase = image.to(dtype=image_embeds.dtype , device=lowercase_ )
UpperCAmelCase = self.movq.encode(lowercase_ )['latents']
UpperCAmelCase = latents.repeat_interleave(lowercase_ , dim=0 )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
UpperCAmelCase = self.prepare_latents(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , image_embeds.dtype , lowercase_ , lowercase_ )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'image_embeds': image_embeds}
UpperCAmelCase = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 78 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
UpperCAmelCase = len(lowercase_ ) if (len(lowercase_ ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(lowercase_ ) , 'Postfix'.center(lowercase_ ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowercase_ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowercase_ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowercase_ ) == 0:
stack.append(lowercase_ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowercase_ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowercase_ ) # push x to stack
print(
x.center(8 ) , (''.join(lowercase_ )).ljust(lowercase_ ) , (''.join(lowercase_ )).ljust(lowercase_ ) , sep=' | ' , ) # Output in tabular format
while len(lowercase_ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(lowercase_ )).ljust(lowercase_ ) , (''.join(lowercase_ )).ljust(lowercase_ ) , sep=' | ' , ) # Output in tabular format
return "".join(lowercase_ ) # return Postfix as str
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowercase_ ) ):
if infix[i] == "(":
UpperCAmelCase = ')' # change "(" to ")"
elif infix[i] == ")":
UpperCAmelCase = '(' # change ")" to "("
return (infix_2_postfix(''.join(lowercase_ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
snake_case_ = input("""\nEnter an Infix Equation = """) # Input an Infix equation
snake_case_ = """""".join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 78 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = x
UpperCAmelCase = y
for step in range(lowercase_ ): # noqa: B007
UpperCAmelCase = a * a - b * b + x
UpperCAmelCase = 2 * a * b + y
UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) )
def _lowerCAmelCase ( lowercase_ = 800 , lowercase_ = 600 , lowercase_ = -0.6 , lowercase_ = 0 , lowercase_ = 3.2 , lowercase_ = 50 , lowercase_ = True , ):
UpperCAmelCase = Image.new('RGB' , (image_width, image_height) )
UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(lowercase_ ):
for image_y in range(lowercase_ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase = figure_width / image_width * image_height
UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase = get_distance(lowercase_ , lowercase_ , lowercase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase = get_color_coded_rgb(lowercase_ )
else:
UpperCAmelCase = get_black_and_white_rgb(lowercase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
snake_case_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 78 | 1 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A_ :
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( *lowercase_ :Tuple , **lowercase_ :Optional[int] ) -> int:
pass
@is_pipeline_test
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self :Tuple , lowercase_ :int , lowercase_ :str , lowercase_ :Union[str, Any] ) -> Dict:
UpperCAmelCase = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
UpperCAmelCase = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Tuple , lowercase_ :Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = vqa_pipeline(lowercase_ , top_k=1 )
self.assertEqual(
lowercase_ , [
[{'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}],
[{'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}],
] , )
@require_torch
def UpperCAmelCase__ ( self :Dict ) -> Optional[Any]:
UpperCAmelCase = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
UpperCAmelCase = './tests/fixtures/tests_samples/COCO/000000039769.png'
UpperCAmelCase = 'How many cats are there?'
UpperCAmelCase = vqa_pipeline(image=lowercase_ , question='How many cats are there?' , top_k=2 )
self.assertEqual(
lowercase_ , [{'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}, {'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}] )
UpperCAmelCase = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
lowercase_ , [{'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}, {'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self :str ) -> Optional[Any]:
UpperCAmelCase = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
UpperCAmelCase = './tests/fixtures/tests_samples/COCO/000000039769.png'
UpperCAmelCase = 'How many cats are there?'
UpperCAmelCase = vqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
UpperCAmelCase = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
UpperCAmelCase = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def UpperCAmelCase__ ( self :Tuple ) -> str:
pass
| 78 |
"""simple docstring"""
import requests
snake_case_ = """""" # <-- Put your OpenWeatherMap appid here!
snake_case_ = """https://api.openweathermap.org/data/2.5/"""
def _lowerCAmelCase ( lowercase_ = "Chicago" , lowercase_ = APPID ):
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def _lowerCAmelCase ( lowercase_ = "Kolkata, India" , lowercase_ = APPID ):
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def _lowerCAmelCase ( lowercase_ = 5_5.6_8 , lowercase_ = 1_2.5_7 , lowercase_ = APPID ):
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
snake_case_ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 78 | 1 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = WavaVecaPhonemeCTCTokenizer
__UpperCamelCase = False
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]:
super().setUp()
UpperCAmelCase = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
UpperCAmelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase_ ) + '\n' )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :str , lowercase_ :Optional[int]=False , lowercase_ :Any=20 , lowercase_ :List[Any]=5 ) -> Tuple[str, list]:
UpperCAmelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase_ )) for i in range(len(lowercase_ ) )]
UpperCAmelCase = list(filter(lambda lowercase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowercase_ ) , lowercase_ ) )
if max_length is not None and len(lowercase_ ) > max_length:
UpperCAmelCase = toks[:max_length]
if min_length is not None and len(lowercase_ ) < min_length and len(lowercase_ ) > 0:
while len(lowercase_ ) < min_length:
UpperCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ )
if " " not in output_txt and len(lowercase_ ) > 1:
UpperCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase_ )
)
if with_prefix_space:
UpperCAmelCase = ' ' + output_txt
UpperCAmelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
return output_txt, output_ids
def UpperCAmelCase__ ( self :List[str] , **lowercase_ :List[Any] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
UpperCAmelCase = tokenizer('m xxx ɪ' , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
UpperCAmelCase = tokenizer('m aaa ɪ ccc' , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
UpperCAmelCase = tokenizer('maɪ c' , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [3, 2_00] ) # mai should be <unk> (=3)
def UpperCAmelCase__ ( self :Optional[int] ) -> Tuple:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(lowercase_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def UpperCAmelCase__ ( self :List[Any] ) -> str:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(lowercase_ ).input_ids , tokenizer(lowercase_ , do_phonemize=lowercase_ ).input_ids )
def UpperCAmelCase__ ( self :List[str] ) -> Optional[Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
UpperCAmelCase = tokenizer.decode(tokenizer(lowercase_ ).input_ids )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> str:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
UpperCAmelCase = tokenizer.decode(sample_ids[0] )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(lowercase_ , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def UpperCAmelCase__ ( self :Any ) -> Optional[Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(lowercase_ ).input_ids , tokenizer(lowercase_ , do_phonemize=lowercase_ ).input_ids )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Any:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
UpperCAmelCase = tokenizer.decode(sample_ids[0] )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
UpperCAmelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowercase_ )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ , filter_word_delimiter_token=lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def UpperCAmelCase__ ( self :Dict ) -> List[Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
UpperCAmelCase = tokenizer.decode(tokenizer(lowercase_ ).input_ids , filter_word_delimiter_token=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
UpperCAmelCase = tokenizer.decode(tokenizer(lowercase_ ).input_ids , filter_word_delimiter_token=lowercase_ )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> str:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=lowercase_ )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer(lowercase_ , phonemizer_lang='en-us' ).input_ids
UpperCAmelCase = tokenizer(lowercase_ , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(lowercase_ , lowercase_ )
UpperCAmelCase = tokenizer.decode(lowercase_ )
UpperCAmelCase = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(lowercase_ , 'ɛ l o h aʊ a ʁ j u' )
def UpperCAmelCase__ ( self :Tuple ) -> Optional[Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how Are you'
UpperCAmelCase = 'hello how are you'
UpperCAmelCase = tokenizer(lowercase_ ).input_ids
UpperCAmelCase = tokenizer(lowercase_ ).input_ids
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :int ) -> List[Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
UpperCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def UpperCAmelCase__ ( lowercase_ :Tuple , lowercase_ :str ) -> List[Any]:
UpperCAmelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase__ ( self :List[Any] ) -> str:
UpperCAmelCase = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
UpperCAmelCase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
UpperCAmelCase = tokenizer.decode(lowercase_ , output_char_offsets=lowercase_ , filter_word_delimiter_token=lowercase_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCAmelCase__ ( self :List[str] ) -> Tuple:
UpperCAmelCase = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(lowercase_ :int , lowercase_ :List[str] ):
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
self.assertTrue(isinstance(outputs_list[0] , lowercase_ ) )
# transform list to ModelOutput
UpperCAmelCase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(lowercase_ :Dict , lowercase_ :Union[str, Any] ):
if isinstance(lowercase_ , lowercase_ ):
[recursive_check(lowercase_ , lowercase_ ) for la, la in zip(lowercase_ , lowercase_ )]
self.assertEqual(lowercase_ , lowercase_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
UpperCAmelCase = tokenizer.batch_decode(lowercase_ , output_char_offsets=lowercase_ )
UpperCAmelCase = [tokenizer.decode(lowercase_ , output_char_offsets=lowercase_ ) for ids in sample_ids]
check_list_tuples_equal(lowercase_ , lowercase_ )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def UpperCAmelCase__ ( self :Dict ) -> Optional[int]:
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def UpperCAmelCase__ ( self :Tuple ) -> str:
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def UpperCAmelCase__ ( self :int ) -> List[Any]:
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def UpperCAmelCase__ ( self :Tuple ) -> List[Any]:
pass
def UpperCAmelCase__ ( self :List[str] ) -> Optional[Any]:
UpperCAmelCase = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase = ['aaaaa bbbbbb', 'cccccccccdddddddd']
UpperCAmelCase = tokenizer.add_tokens(lowercase_ )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , len(lowercase_ ) )
self.assertEqual(lowercase_ , all_size + len(lowercase_ ) )
UpperCAmelCase = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=lowercase_ )
self.assertGreaterEqual(len(lowercase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCAmelCase = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
UpperCAmelCase = tokenizer.add_special_tokens(lowercase_ )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , len(lowercase_ ) )
self.assertEqual(lowercase_ , all_size_a + len(lowercase_ ) )
UpperCAmelCase = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=lowercase_ )
self.assertGreaterEqual(len(lowercase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def UpperCAmelCase__ ( self :Any ) -> Union[str, Any]:
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def UpperCAmelCase__ ( self :Optional[int] ) -> List[str]:
pass
def UpperCAmelCase__ ( self :Dict ) -> int:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
UpperCAmelCase = self.get_tokenizers(fast=lowercase_ , do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
UpperCAmelCase = tokenizer.convert_tokens_to_string(lowercase_ )
self.assertIsInstance(output['text'] , lowercase_ )
| 78 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""image_processor""", """tokenizer"""]
__UpperCamelCase = """LayoutLMv2ImageProcessor"""
__UpperCamelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self :Any , lowercase_ :int=None , lowercase_ :Union[str, Any]=None , **lowercase_ :Optional[Any] ) -> Dict:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
UpperCAmelCase = kwargs.pop('feature_extractor' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :str , lowercase_ :Optional[int] , lowercase_ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ :Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ :Optional[Union[List[int], List[List[int]]]] = None , lowercase_ :bool = True , lowercase_ :Union[bool, str, PaddingStrategy] = False , lowercase_ :Union[bool, str, TruncationStrategy] = None , lowercase_ :Optional[int] = None , lowercase_ :int = 0 , lowercase_ :Optional[int] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[bool] = None , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = True , lowercase_ :Optional[Union[str, TensorType]] = None , **lowercase_ :Any , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase = features['words']
UpperCAmelCase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
UpperCAmelCase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
UpperCAmelCase = self.get_overflowing_images(lowercase_ , encoded_inputs['overflow_to_sample_mapping'] )
UpperCAmelCase = images
return encoded_inputs
def UpperCAmelCase__ ( self :Dict , lowercase_ :List[Any] , lowercase_ :Any ) -> Optional[Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(lowercase_ )} and {len(lowercase_ )}""" )
return images_with_overflow
def UpperCAmelCase__ ( self :Any , *lowercase_ :int , **lowercase_ :Tuple ) -> Tuple:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , *lowercase_ :List[Any] , **lowercase_ :Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ ( self :int ) -> Dict:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 78 | 1 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
snake_case_ = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class A_ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
def UpperCAmelCase__ ( self :Optional[int] ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = _str_to_version_tuple(self.version_str )
def __repr__( self :Optional[Any] ) -> str:
return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def UpperCAmelCase__ ( self :str ) -> str:
return self.major, self.minor, self.patch
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int ) -> List[str]:
if isinstance(lowercase_ , lowercase_ ):
return Version(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
return other
raise TypeError(f"""{other} (type {type(lowercase_ )}) cannot be compared to version.""" )
def __eq__( self :Union[str, Any] , lowercase_ :List[Any] ) -> str:
try:
UpperCAmelCase = self._validate_operand(lowercase_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self :List[str] , lowercase_ :Optional[int] ) -> str:
UpperCAmelCase = self._validate_operand(lowercase_ )
return self.tuple < other.tuple
def __hash__( self :Dict ) -> Tuple:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCAmelCase__ ( cls :Tuple , lowercase_ :Any ) -> Optional[int]:
UpperCAmelCase = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCAmelCase__ ( self :Tuple ) -> str:
return self.version_str
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = _VERSION_REG.match(lowercase_ )
if not res:
raise ValueError(F"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(lowercase_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def _lowerCAmelCase ( lowercase_ ):
return ".".join(str(lowercase_ ) for v in version_tuple )
| 78 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Union[str, Any] ) -> str:
UpperCAmelCase = {}
def UpperCAmelCase__ ( self :Any , lowercase_ :List[Any] , lowercase_ :List[str] , lowercase_ :Dict=1 ) -> List[Any]:
if self.graph.get(lowercase_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCAmelCase = [[w, v]]
if not self.graph.get(lowercase_ ):
UpperCAmelCase = []
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
return list(self.graph )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Optional[int] , lowercase_ :Optional[Any] ) -> Dict:
if self.graph.get(lowercase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Tuple=-2 , lowercase_ :List[Any]=-1 ) -> List[Any]:
if s == d:
return []
UpperCAmelCase = []
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return visited
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int=-1 ) -> Tuple:
if c == -1:
UpperCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(lowercase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase_ , lowercase_ , 1 )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :Optional[Any]=-2 ) -> Optional[int]:
UpperCAmelCase = deque()
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
d.append(lowercase_ )
visited.append(lowercase_ )
while d:
UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[int] ) -> List[Any]:
UpperCAmelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[str] ) -> List[str]:
return len(self.graph[u] )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Any=-2 ) -> int:
UpperCAmelCase = []
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return sorted_nodes
def UpperCAmelCase__ ( self :str ) -> str:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return list(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> Tuple:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return False
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int=-2 , lowercase_ :List[str]=-1 ) -> Any:
UpperCAmelCase = time()
self.dfs(lowercase_ , lowercase_ )
UpperCAmelCase = time()
return end - begin
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[str]=-2 ) -> str:
UpperCAmelCase = time()
self.bfs(lowercase_ )
UpperCAmelCase = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :List[str] ) -> Union[str, Any]:
UpperCAmelCase = {}
def UpperCAmelCase__ ( self :str , lowercase_ :Dict , lowercase_ :Optional[Any] , lowercase_ :Optional[int]=1 ) -> Dict:
# check if the u exists
if self.graph.get(lowercase_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCAmelCase = [[w, v]]
# add the other way
if self.graph.get(lowercase_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCAmelCase = [[w, u]]
def UpperCAmelCase__ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :Tuple ) -> Optional[Any]:
if self.graph.get(lowercase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_ )
# the other way round
if self.graph.get(lowercase_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Optional[int]=-2 , lowercase_ :Optional[int]=-1 ) -> List[str]:
if s == d:
return []
UpperCAmelCase = []
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return visited
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Optional[int]=-1 ) -> Any:
if c == -1:
UpperCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(lowercase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase_ , lowercase_ , 1 )
def UpperCAmelCase__ ( self :Dict , lowercase_ :int=-2 ) -> int:
UpperCAmelCase = deque()
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
d.append(lowercase_ )
visited.append(lowercase_ )
while d:
UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[Any] ) -> str:
return len(self.graph[u] )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Any:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return list(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return False
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Union[str, Any]:
return list(self.graph )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Union[str, Any]=-2 , lowercase_ :List[str]=-1 ) -> str:
UpperCAmelCase = time()
self.dfs(lowercase_ , lowercase_ )
UpperCAmelCase = time()
return end - begin
def UpperCAmelCase__ ( self :Any , lowercase_ :int=-2 ) -> str:
UpperCAmelCase = time()
self.bfs(lowercase_ )
UpperCAmelCase = time()
return end - begin
| 78 | 1 |
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Any ) -> Tuple:
UpperCAmelCase = 0
@slow
def UpperCAmelCase__ ( self :Dict ) -> int:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase_ ) , 0 )
def UpperCAmelCase__ ( self :List[Any] ) -> List[Any]:
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def UpperCAmelCase__ ( self :Dict ) -> str:
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def UpperCAmelCase__ ( self :Optional[int] ) -> int:
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , config=lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def UpperCAmelCase__ ( self :Any ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(lowercase_ , 'vocab.txt' ) )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type='bert' , use_fast=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(lowercase_ , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(lowercase_ , 'merges.txt' ) )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type='gpt2' , use_fast=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@require_tokenizers
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(lowercase_ , 'vocab.txt' ) )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type='bert' )
self.assertIsInstance(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(lowercase_ , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(lowercase_ , 'merges.txt' ) )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type='gpt2' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> List[Any]:
with pytest.raises(lowercase_ ):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx' )
@require_tokenizers
def UpperCAmelCase__ ( self :List[str] ) -> List[str]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase_ , lowercase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase_ )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase_ )
self.assertEqual(tokenizer.model_max_length , 5_12 )
@require_tokenizers
def UpperCAmelCase__ ( self :str ) -> Any:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase_ , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
UpperCAmelCase = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' )
def UpperCAmelCase__ ( self :Any ) -> Optional[Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
UpperCAmelCase = TOKENIZER_MAPPING.values()
UpperCAmelCase = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase_ )
@require_tokenizers
def UpperCAmelCase__ ( self :Dict ) -> Dict:
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=lowercase_ ) , lowercase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) , lowercase_ )
@require_tokenizers
def UpperCAmelCase__ ( self :Union[str, Any] ) -> str:
UpperCAmelCase = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=lowercase_ )
UpperCAmelCase = 'Hello, world. How are you?'
UpperCAmelCase = tokenizer.tokenize(lowercase_ )
self.assertEqual('[UNK]' , tokens[0] )
UpperCAmelCase = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=lowercase_ )
UpperCAmelCase = tokenizer.tokenize(lowercase_ )
self.assertEqual('[UNK]' , tokens[0] )
@require_tokenizers
def UpperCAmelCase__ ( self :int ) -> str:
UpperCAmelCase = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' )
self.assertEqual(type(lowercase_ ) , lowercase_ )
self.assertEqual(tokenizer.model_max_length , 5_12 )
self.assertEqual(tokenizer.vocab_size , 3_00_00 )
self.assertEqual(tokenizer.unk_token , '[UNK]' )
self.assertEqual(tokenizer.padding_side , 'right' )
self.assertEqual(tokenizer.truncation_side , 'right' )
def UpperCAmelCase__ ( self :int ) -> Tuple:
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def UpperCAmelCase__ ( self :Dict ) -> Optional[int]:
UpperCAmelCase = AutoTokenizer.from_pretrained('ctrl' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :str ) -> Optional[int]:
# Check we can load the tokenizer config of an online model.
UpperCAmelCase = get_tokenizer_config('bert-base-cased' )
UpperCAmelCase = config.pop('_commit_hash' , lowercase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase_ , {'do_lower_case': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase = get_tokenizer_config(lowercase_ )
self.assertDictEqual(lowercase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase = get_tokenizer_config(lowercase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer' )
def UpperCAmelCase__ ( self :Dict ) -> Tuple:
try:
AutoConfig.register('custom' , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
UpperCAmelCase = CustomTokenizer.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def UpperCAmelCase__ ( self :Tuple ) -> List[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
# Can register in two steps
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase_ , slow_tokenizer_class=lowercase_ , fast_tokenizer_class=lowercase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = BertTokenizerFast.from_pretrained(lowercase_ )
bert_tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase = CustomTokenizerFast.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , use_fast=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase__ ( self :Dict ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
UpperCAmelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
@require_tokenizers
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = False
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = NewTokenizer
__UpperCamelCase = False
try:
AutoConfig.register('custom' , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase__ ( self :Tuple ) -> List[str]:
UpperCAmelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
UpperCAmelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def UpperCAmelCase__ ( self :List[Any] ) -> List[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase = AutoTokenizer.from_pretrained('bert-base' )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[int]:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase__ ( self :Tuple ) -> Tuple:
# Make sure we have cached the tokenizer.
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 78 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 78 | 1 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowercase_ :int ) -> Optional[int]:
UpperCAmelCase = n
UpperCAmelCase = [None] * self.n
UpperCAmelCase = 0 # index of the first element
UpperCAmelCase = 0
UpperCAmelCase = 0
def __len__( self :Tuple ) -> int:
return self.size
def UpperCAmelCase__ ( self :int ) -> bool:
return self.size == 0
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
return False if self.is_empty() else self.array[self.front]
def UpperCAmelCase__ ( self :int , lowercase_ :int ) -> Dict:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
UpperCAmelCase = data
UpperCAmelCase = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCAmelCase__ ( self :str ) -> Tuple:
if self.size == 0:
raise Exception('UNDERFLOW' )
UpperCAmelCase = self.array[self.front]
UpperCAmelCase = None
UpperCAmelCase = (self.front + 1) % self.n
self.size -= 1
return temp
| 78 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ , lowercase_ = " " ):
UpperCAmelCase = []
UpperCAmelCase = 0
for index, char in enumerate(lowercase_ ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase = index + 1
elif index + 1 == len(lowercase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 78 | 1 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _lowerCAmelCase ( lowercase_ = 8 ):
UpperCAmelCase = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(lowercase_ )
UpperCAmelCase = i // 3
UpperCAmelCase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCAmelCase = (
chars_incl
+ random(lowercase_ , quotient + remainder )
+ random(lowercase_ , lowercase_ )
+ random(lowercase_ , lowercase_ )
)
UpperCAmelCase = list(lowercase_ )
shuffle(lowercase_ )
return "".join(lowercase_ )
# random is a generalised function for letters, characters and numbers
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ = 8 ):
if len(lowercase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCAmelCase = any(char in ascii_uppercase for char in password )
UpperCAmelCase = any(char in ascii_lowercase for char in password )
UpperCAmelCase = any(char in digits for char in password )
UpperCAmelCase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _lowerCAmelCase ( ):
UpperCAmelCase = int(input('Please indicate the max length of your password: ' ).strip() )
UpperCAmelCase = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(lowercase_ ) )
print(
'Alternative Password generated:' , alternative_password_generator(lowercase_ , lowercase_ ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 78 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
snake_case_ = logging.getLogger(__name__)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = git.Repo(search_parent_directories=lowercase_ )
UpperCAmelCase = {
'repo_id': str(lowercase_ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , 'git_log.json' ) , 'w' ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def _lowerCAmelCase ( lowercase_ ):
if params.n_gpu <= 0:
UpperCAmelCase = 0
UpperCAmelCase = -1
UpperCAmelCase = True
UpperCAmelCase = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCAmelCase = int(os.environ['WORLD_SIZE'] )
UpperCAmelCase = int(os.environ['N_GPU_NODE'] )
UpperCAmelCase = int(os.environ['RANK'] )
# number of nodes / node ID
UpperCAmelCase = params.world_size // params.n_gpu_per_node
UpperCAmelCase = params.global_rank // params.n_gpu_per_node
UpperCAmelCase = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 1
UpperCAmelCase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCAmelCase = params.node_id == 0 and params.local_rank == 0
UpperCAmelCase = params.n_nodes > 1
# summary
UpperCAmelCase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _lowerCAmelCase ( lowercase_ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 78 | 1 |
"""simple docstring"""
import math
class A_ :
"""simple docstring"""
def UpperCAmelCase__ ( self :str , lowercase_ :list[list[float]] , lowercase_ :list[int] ) -> int:
UpperCAmelCase = 0.0
UpperCAmelCase = 0.0
for i in range(len(lowercase_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCAmelCase__ ( self :Dict , lowercase_ :list[list[int | float]] , lowercase_ :list[int] , lowercase_ :int , lowercase_ :float ) -> list[list[int | float]]:
for i in range(len(lowercase_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCAmelCase ( ):
# Training Examples ( m, n )
UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
UpperCAmelCase = SelfOrganizingMap()
UpperCAmelCase = 3
UpperCAmelCase = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
UpperCAmelCase = training_samples[j]
# Compute the winning vector
UpperCAmelCase = self_organizing_map.get_winner(lowercase_ , lowercase_ )
# Update the winning vector
UpperCAmelCase = self_organizing_map.update(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# classify test sample
UpperCAmelCase = [0, 0, 0, 1]
UpperCAmelCase = self_organizing_map.get_winner(lowercase_ , lowercase_ )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 78 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
snake_case_ = """1"""
snake_case_ = """0"""
snake_case_ = """1"""
snake_case_ = ort.SessionOptions()
snake_case_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
snake_case_ = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
snake_case_ = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
snake_case_ = ort.RunOptions()
snake_case_ = 128
snake_case_ = 1
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
snake_case_ = time.time()
snake_case_ = 2000
snake_case_ = {}
for iter in range(max_iters):
snake_case_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1000 / max_iters))
| 78 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
snake_case_ = logging.get_logger(__name__)
@dataclass
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self :List[str] , **lowercase_ :str ) -> Dict:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase = deprecated_arg[3:]
setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
UpperCAmelCase = kwargs.pop('torchscript' , self.torchscript )
UpperCAmelCase = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
UpperCAmelCase = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**lowercase_ )
__UpperCamelCase = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Trace the models using torchscript"""} )
__UpperCamelCase = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
__UpperCamelCase = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def UpperCAmelCase__ ( self :Any ) -> Tuple["torch.device", int]:
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
UpperCAmelCase = torch.device('cpu' )
UpperCAmelCase = 0
elif is_torch_tpu_available():
UpperCAmelCase = xm.xla_device()
UpperCAmelCase = 0
else:
UpperCAmelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
UpperCAmelCase = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase__ ( self :List[str] ) -> int:
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase__ ( self :Tuple ) -> "torch.device":
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def UpperCAmelCase__ ( self :Dict ) -> Dict:
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def UpperCAmelCase__ ( self :Optional[int] ) -> List[str]:
return self.n_gpu > 0
| 78 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case_ = logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""pixel_values"""]
def __init__( self :int , lowercase_ :bool = True , lowercase_ :Dict[str, int] = None , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :bool = True , lowercase_ :Union[int, float] = 1 / 2_55 , lowercase_ :bool = True , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :bool = True , **lowercase_ :Union[str, Any] , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase = size if size is not None else {'height': 3_84, 'width': 3_84}
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase = do_convert_rgb
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :np.ndarray , lowercase_ :Dict[str, int] , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Any , ) -> np.ndarray:
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
UpperCAmelCase = (size['height'], size['width'])
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :np.ndarray , lowercase_ :Union[int, float] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[int] , ) -> int:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :np.ndarray , lowercase_ :Union[float, List[float]] , lowercase_ :Union[float, List[float]] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[Any] , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :ImageInput , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Dict[str, int]] = None , lowercase_ :PILImageResampling = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[float] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[str, TensorType]] = None , lowercase_ :bool = None , lowercase_ :ChannelDimension = ChannelDimension.FIRST , **lowercase_ :Tuple , ) -> PIL.Image.Image:
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase_ )
return encoded_outputs
| 78 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
# Base Case
if index == len(lowercase_ ):
return True
# Recursive Step
for i in range(lowercase_ ):
if valid_coloring(graph[index] , lowercase_ , lowercase_ ):
# Color current vertex
UpperCAmelCase = i
# Validate coloring
if util_color(lowercase_ , lowercase_ , lowercase_ , index + 1 ):
return True
# Backtrack
UpperCAmelCase = -1
return False
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = [-1] * len(lowercase_ )
if util_color(lowercase_ , lowercase_ , lowercase_ , 0 ):
return colored_vertices
return []
| 78 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """beit"""
def __init__( self :List[str] , lowercase_ :List[Any]=81_92 , lowercase_ :str=7_68 , lowercase_ :List[str]=12 , lowercase_ :Optional[int]=12 , lowercase_ :Dict=30_72 , lowercase_ :Tuple="gelu" , lowercase_ :Any=0.0 , lowercase_ :Optional[int]=0.0 , lowercase_ :Dict=0.02 , lowercase_ :int=1E-12 , lowercase_ :List[Any]=2_24 , lowercase_ :Dict=16 , lowercase_ :List[Any]=3 , lowercase_ :List[str]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Union[str, Any]=0.1 , lowercase_ :str=0.1 , lowercase_ :str=True , lowercase_ :List[str]=[3, 5, 7, 11] , lowercase_ :Optional[int]=[1, 2, 3, 6] , lowercase_ :str=True , lowercase_ :int=0.4 , lowercase_ :Union[str, Any]=2_56 , lowercase_ :int=1 , lowercase_ :Tuple=False , lowercase_ :Optional[int]=2_55 , **lowercase_ :str , ) -> Any:
super().__init__(**lowercase_ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = use_mask_token
UpperCAmelCase = use_absolute_position_embeddings
UpperCAmelCase = use_relative_position_bias
UpperCAmelCase = use_shared_relative_position_bias
UpperCAmelCase = layer_scale_init_value
UpperCAmelCase = drop_path_rate
UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase = out_indices
UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase = use_auxiliary_head
UpperCAmelCase = auxiliary_loss_weight
UpperCAmelCase = auxiliary_channels
UpperCAmelCase = auxiliary_num_convs
UpperCAmelCase = auxiliary_concat_input
UpperCAmelCase = semantic_loss_ignore_index
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self :Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase__ ( self :Tuple ) -> float:
return 1E-4
| 78 | 1 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
snake_case_ = """"""
snake_case_ = """"""
snake_case_ = """"""
snake_case_ = 1 # (0 is vertical, 1 is horizontal)
def _lowerCAmelCase ( ):
UpperCAmelCase , UpperCAmelCase = get_dataset(lowercase_ , lowercase_ )
print('Processing...' )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = update_image_and_anno(lowercase_ , lowercase_ , lowercase_ )
for index, image in enumerate(lowercase_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase = random_chars(32 )
UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
UpperCAmelCase = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , lowercase_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Success {index+1}/{len(lowercase_ )} with {file_name}""" )
UpperCAmelCase = []
for anno in new_annos[index]:
UpperCAmelCase = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(lowercase_ )
with open(F"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = []
UpperCAmelCase = []
for label_file in glob.glob(os.path.join(lowercase_ , '*.txt' ) ):
UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(lowercase_ ) as in_file:
UpperCAmelCase = in_file.readlines()
UpperCAmelCase = os.path.join(lowercase_ , F"""{label_name}.jpg""" )
UpperCAmelCase = []
for obj_list in obj_lists:
UpperCAmelCase = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(lowercase_ )
labels.append(lowercase_ )
return img_paths, labels
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ = 1 ):
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for idx in range(len(lowercase_ ) ):
UpperCAmelCase = []
UpperCAmelCase = img_list[idx]
path_list.append(lowercase_ )
UpperCAmelCase = anno_list[idx]
UpperCAmelCase = cva.imread(lowercase_ )
if flip_type == 1:
UpperCAmelCase = cva.flip(lowercase_ , lowercase_ )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase = cva.flip(lowercase_ , lowercase_ )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(lowercase_ )
new_imgs_list.append(lowercase_ )
return new_imgs_list, new_annos_lists, path_list
def _lowerCAmelCase ( lowercase_ = 32 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(lowercase_ ) for _ in range(lowercase_ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 78 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case_ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ = 1000 ):
UpperCAmelCase , UpperCAmelCase = 1, 1
UpperCAmelCase = 2
while True:
UpperCAmelCase = 0
UpperCAmelCase = fa + fa
UpperCAmelCase , UpperCAmelCase = fa, f
index += 1
for _ in str(lowercase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 78 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , lowercase_ ).groups()[0]
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :Dict , lowercase_ :List[str]=None , lowercase_ :Optional[Any]=None ) -> Optional[int]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self :Optional[int] ) -> Optional[Any]:
return len(self.file_names )
def __getitem__( self :int , lowercase_ :str ) -> List[str]:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowercase_ )
UpperCAmelCase = raw_image.convert('RGB' )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowercase_ )
UpperCAmelCase = extract_label(lowercase_ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['lr']
UpperCAmelCase = int(config['num_epochs'] )
UpperCAmelCase = int(config['seed'] )
UpperCAmelCase = int(config['batch_size'] )
UpperCAmelCase = config['image_size']
if not isinstance(lowercase_ , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(lowercase_ )[-1].split('.' )[0]
accelerator.init_trackers(lowercase_ , lowercase_ )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , lowercase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
UpperCAmelCase = [extract_label(lowercase_ ) for fname in file_names]
UpperCAmelCase = list(set(lowercase_ ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(lowercase_ )}
# Set the seed before splitting the data.
np.random.seed(lowercase_ )
torch.manual_seed(lowercase_ )
torch.cuda.manual_seed_all(lowercase_ )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(lowercase_ ) )
UpperCAmelCase = int(0.8 * len(lowercase_ ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(lowercase_ , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(lowercase_ ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model('resnet50d' , pretrained=lowercase_ , num_classes=len(lowercase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=lowercase_ , max_lr=lowercase_ , epochs=lowercase_ , steps_per_epoch=len(lowercase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(lowercase_ )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace('epoch_' , '' ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace('step_' , '' ) )
UpperCAmelCase = resume_step // len(lowercase_ )
resume_step -= starting_epoch * len(lowercase_ )
# Now we train the model
for epoch in range(lowercase_ , lowercase_ ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(lowercase_ , lowercase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch['image'] - mean) / std
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = torch.nn.functional.cross_entropy(lowercase_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch['image'] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['label']) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(lowercase_ ),
'epoch': epoch,
} , step=lowercase_ , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase ( ):
UpperCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=lowercase_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=lowercase_ , default=lowercase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=lowercase_ , default=lowercase_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=lowercase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=lowercase_ , default=lowercase_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=lowercase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 78 | 1 |
"""simple docstring"""
import argparse
import json
import subprocess
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = []
UpperCAmelCase = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
UpperCAmelCase = subprocess.run(lowercase_ , shell=lowercase_ , stdout=subprocess.PIPE )
UpperCAmelCase = output.stdout.decode('utf-8' )
UpperCAmelCase = json.loads(lowercase_ )
UpperCAmelCase = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowercase_ )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(lowercase_ ) )
if len(lowercase_ ) > 0:
UpperCAmelCase = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def _lowerCAmelCase ( lowercase_ ):
return values.split(',' )
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
snake_case_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 78 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = list(range(len(lowercase_ ) ) )
UpperCAmelCase = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
UpperCAmelCase = 0
UpperCAmelCase = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ ):
if len(lowercase_ ) <= 1:
return [tuple(lowercase_ )]
UpperCAmelCase = []
def generate(lowercase_ , lowercase_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowercase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
UpperCAmelCase , UpperCAmelCase = arr[k - 1], arr[i]
else: # k is odd
UpperCAmelCase , UpperCAmelCase = arr[k - 1], arr[0]
generate(k - 1 , lowercase_ )
generate(len(lowercase_ ) , lowercase_ )
return res
if __name__ == "__main__":
snake_case_ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case_ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 78 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Any , *lowercase_ :str , **lowercase_ :List[Any] ) -> Union[str, Any]:
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any=None , lowercase_ :Optional[int]=None , lowercase_ :Tuple=None , **lowercase_ :Tuple ) -> Dict:
UpperCAmelCase , UpperCAmelCase = {}, {}
if padding is not None:
UpperCAmelCase = padding
if truncation is not None:
UpperCAmelCase = truncation
if top_k is not None:
UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self :List[Any] , lowercase_ :Union["Image.Image", str] , lowercase_ :str = None , **lowercase_ :Union[str, Any] ) -> Union[str, Any]:
if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = {'image': image, 'question': question}
else:
UpperCAmelCase = image
UpperCAmelCase = super().__call__(lowercase_ , **lowercase_ )
return results
def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[Any] , lowercase_ :int=False , lowercase_ :Optional[int]=False ) -> Union[str, Any]:
UpperCAmelCase = load_image(inputs['image'] )
UpperCAmelCase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ )
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[str] ) -> Any:
UpperCAmelCase = self.model(**lowercase_ )
return model_outputs
def UpperCAmelCase__ ( self :Dict , lowercase_ :Tuple , lowercase_ :List[Any]=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.sigmoid()[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(lowercase_ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 78 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class A_ :
"""simple docstring"""
__UpperCamelCase = BlenderbotConfig
__UpperCamelCase = {}
__UpperCamelCase = """gelu"""
def __init__( self :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Any=13 , lowercase_ :Optional[int]=7 , lowercase_ :Union[str, Any]=True , lowercase_ :Union[str, Any]=False , lowercase_ :List[Any]=99 , lowercase_ :str=32 , lowercase_ :Union[str, Any]=2 , lowercase_ :Optional[Any]=4 , lowercase_ :Any=37 , lowercase_ :Optional[Any]=0.1 , lowercase_ :Any=0.1 , lowercase_ :Optional[Any]=20 , lowercase_ :Dict=2 , lowercase_ :Optional[int]=1 , lowercase_ :Dict=0 , ) -> Dict:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def UpperCAmelCase__ ( self :str ) -> List[Any]:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCAmelCase__ ( self :str , lowercase_ :str , lowercase_ :List[str] ) -> Optional[int]:
UpperCAmelCase = TFBlenderbotModel(config=lowercase_ ).get_decoder()
UpperCAmelCase = inputs_dict['input_ids']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['attention_mask'][:1, :]
UpperCAmelCase = inputs_dict['head_mask']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ )[0]
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1E-3 )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ):
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__UpperCamelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
def UpperCAmelCase__ ( self :int ) -> List[Any]:
UpperCAmelCase = TFBlenderbotModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ )
def UpperCAmelCase__ ( self :Tuple ) -> Any:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :List[Any] ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
@require_tokenizers
@require_tf
class A_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = ["""My friends are cool but they eat too many carbs."""]
__UpperCamelCase = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCAmelCase__ ( self :Tuple ) -> Optional[Any]:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self :Optional[int] ) -> Dict:
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
UpperCAmelCase = self.tokenizer(self.src_text , return_tensors='tf' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 78 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """transfo-xl"""
__UpperCamelCase = ["""mems"""]
__UpperCamelCase = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :List[Any] , lowercase_ :Optional[int]=26_77_35 , lowercase_ :Union[str, Any]=[2_00_00, 4_00_00, 20_00_00] , lowercase_ :List[Any]=10_24 , lowercase_ :Optional[Any]=10_24 , lowercase_ :Tuple=16 , lowercase_ :Tuple=64 , lowercase_ :Any=40_96 , lowercase_ :int=4 , lowercase_ :List[str]=False , lowercase_ :Union[str, Any]=18 , lowercase_ :Optional[Any]=16_00 , lowercase_ :Dict=10_00 , lowercase_ :Optional[int]=True , lowercase_ :Tuple=True , lowercase_ :Dict=0 , lowercase_ :Tuple=-1 , lowercase_ :Optional[int]=True , lowercase_ :Optional[int]=0.1 , lowercase_ :str=0.0 , lowercase_ :List[str]=True , lowercase_ :int="normal" , lowercase_ :Dict=0.01 , lowercase_ :Optional[Any]=0.01 , lowercase_ :Dict=0.02 , lowercase_ :Tuple=1E-5 , lowercase_ :str=0 , **lowercase_ :Tuple , ) -> List[str]:
UpperCAmelCase = vocab_size
UpperCAmelCase = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
UpperCAmelCase = [False] + [True] * len(self.cutoffs )
else:
UpperCAmelCase = [False] + [False] * len(self.cutoffs )
UpperCAmelCase = d_model
UpperCAmelCase = d_embed
UpperCAmelCase = d_head
UpperCAmelCase = d_inner
UpperCAmelCase = div_val
UpperCAmelCase = pre_lnorm
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = mem_len
UpperCAmelCase = same_length
UpperCAmelCase = attn_type
UpperCAmelCase = clamp_len
UpperCAmelCase = sample_softmax
UpperCAmelCase = adaptive
UpperCAmelCase = dropout
UpperCAmelCase = dropatt
UpperCAmelCase = untie_r
UpperCAmelCase = init
UpperCAmelCase = init_range
UpperCAmelCase = proj_init_std
UpperCAmelCase = init_std
UpperCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Any:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any ) -> Tuple:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 78 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """rwkv"""
__UpperCamelCase = {"""max_position_embeddings""": """context_length"""}
def __init__( self :Dict , lowercase_ :Tuple=5_02_77 , lowercase_ :Union[str, Any]=10_24 , lowercase_ :Union[str, Any]=40_96 , lowercase_ :Dict=32 , lowercase_ :Optional[int]=None , lowercase_ :int=None , lowercase_ :Any=1E-5 , lowercase_ :Union[str, Any]=0 , lowercase_ :List[str]=0 , lowercase_ :str=6 , lowercase_ :Optional[int]=False , lowercase_ :Tuple=True , **lowercase_ :Optional[int] , ) -> Dict:
UpperCAmelCase = vocab_size
UpperCAmelCase = context_length
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = rescale_every
UpperCAmelCase = use_cache
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 78 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowerCAmelCase ( lowercase_ = "isbn/0140328726" ):
UpperCAmelCase = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
UpperCAmelCase = F"""{olid} is not a valid Open Library olid"""
raise ValueError(lowercase_ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
UpperCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
UpperCAmelCase = data['First sentence']['value']
for key, value in data.items():
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = ', '.join(lowercase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
snake_case_ = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
snake_case_ = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 78 | 1 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def _lowerCAmelCase ( lowercase_ ):
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
UpperCAmelCase = precision
UpperCAmelCase = ceil(precision / 14 )
UpperCAmelCase = 426880 * Decimal(10005 ).sqrt()
UpperCAmelCase = 1
UpperCAmelCase = 13591409
UpperCAmelCase = Decimal(lowercase_ )
for k in range(1 , lowercase_ ):
UpperCAmelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase_ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case_ = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 78 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :int , lowercase_ :Optional[int]=None , lowercase_ :List[str]=None ) -> str:
UpperCAmelCase = data
UpperCAmelCase = previous
UpperCAmelCase = next_node
def __str__( self :Optional[Any] ) -> str:
return f"""{self.data}"""
def UpperCAmelCase__ ( self :int ) -> int:
return self.data
def UpperCAmelCase__ ( self :List[str] ) -> Any:
return self.next
def UpperCAmelCase__ ( self :Tuple ) -> Optional[int]:
return self.previous
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :Optional[Any] ) -> str:
UpperCAmelCase = head
def __iter__( self :List[str] ) -> List[str]:
return self
def UpperCAmelCase__ ( self :int ) -> Any:
if not self.current:
raise StopIteration
else:
UpperCAmelCase = self.current.get_data()
UpperCAmelCase = self.current.get_next()
return value
class A_ :
"""simple docstring"""
def __init__( self :Union[str, Any] ) -> List[Any]:
UpperCAmelCase = None # First node in list
UpperCAmelCase = None # Last node in list
def __str__( self :List[Any] ) -> Optional[Any]:
UpperCAmelCase = self.head
UpperCAmelCase = []
while current is not None:
nodes.append(current.get_data() )
UpperCAmelCase = current.get_next()
return " ".join(str(lowercase_ ) for node in nodes )
def __contains__( self :str , lowercase_ :int ) -> str:
UpperCAmelCase = self.head
while current:
if current.get_data() == value:
return True
UpperCAmelCase = current.get_next()
return False
def __iter__( self :Tuple ) -> Dict:
return LinkedListIterator(self.head )
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]:
if self.head:
return self.head.get_data()
return None
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]:
if self.tail:
return self.tail.get_data()
return None
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Node ) -> None:
if self.head is None:
UpperCAmelCase = node
UpperCAmelCase = node
else:
self.insert_before_node(self.head , lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :Node ) -> None:
if self.head is None:
self.set_head(lowercase_ )
else:
self.insert_after_node(self.tail , lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int ) -> None:
UpperCAmelCase = Node(lowercase_ )
if self.head is None:
self.set_head(lowercase_ )
else:
self.set_tail(lowercase_ )
def UpperCAmelCase__ ( self :int , lowercase_ :Node , lowercase_ :Node ) -> None:
UpperCAmelCase = node
UpperCAmelCase = node.previous
if node.get_previous() is None:
UpperCAmelCase = node_to_insert
else:
UpperCAmelCase = node_to_insert
UpperCAmelCase = node_to_insert
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Node , lowercase_ :Node ) -> None:
UpperCAmelCase = node
UpperCAmelCase = node.next
if node.get_next() is None:
UpperCAmelCase = node_to_insert
else:
UpperCAmelCase = node_to_insert
UpperCAmelCase = node_to_insert
def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = 1
UpperCAmelCase = Node(lowercase_ )
UpperCAmelCase = self.head
while node:
if current_position == position:
self.insert_before_node(lowercase_ , lowercase_ )
return
current_position += 1
UpperCAmelCase = node.next
self.insert_after_node(self.tail , lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int ) -> Node:
UpperCAmelCase = self.head
while node:
if node.get_data() == item:
return node
UpperCAmelCase = node.get_next()
raise Exception('Node not found' )
def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[Any] ) -> Dict:
if (node := self.get_node(lowercase_ )) is not None:
if node == self.head:
UpperCAmelCase = self.head.get_next()
if node == self.tail:
UpperCAmelCase = self.tail.get_previous()
self.remove_node_pointers(lowercase_ )
@staticmethod
def UpperCAmelCase__ ( lowercase_ :Node ) -> None:
if node.get_next():
UpperCAmelCase = node.previous
if node.get_previous():
UpperCAmelCase = node.next
UpperCAmelCase = None
UpperCAmelCase = None
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]:
return self.head is None
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
UpperCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
UpperCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
UpperCAmelCase = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(lowercase_ )-1}""" )
if "norm" in key:
UpperCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
UpperCAmelCase = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(lowercase_ )-1}""" )
if "layer_norm1" in key:
UpperCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
UpperCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase = key[key.find('block' ) + len('block' )]
UpperCAmelCase = key.replace(F"""block{idx}""" , F"""block.{int(lowercase_ )-1}""" )
if "attn.q" in key:
UpperCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
UpperCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
UpperCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
UpperCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
UpperCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
UpperCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
UpperCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
UpperCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
UpperCAmelCase = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(lowercase_ )-1}""" )
if "bot_conv" in key:
UpperCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
UpperCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
UpperCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
UpperCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
UpperCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
UpperCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
UpperCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
UpperCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
UpperCAmelCase = value
return new_state_dict
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
UpperCAmelCase = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase = kv_bias[config.hidden_sizes[i] :]
def _lowerCAmelCase ( ):
UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return image
@torch.no_grad()
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=False , lowercase_=None ):
UpperCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase = GLPNImageProcessor()
# prepare image
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowercase_ , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
UpperCAmelCase = torch.load(lowercase_ , map_location=torch.device('cpu' ) )
# rename keys
UpperCAmelCase = rename_keys(lowercase_ )
# key and value matrices need special treatment
read_in_k_v(lowercase_ , lowercase_ )
# create HuggingFace model and load state dict
UpperCAmelCase = GLPNForDepthEstimation(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
# forward pass
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase = torch.tensor(
[[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] )
elif "kitti" in model_name:
UpperCAmelCase = torch.tensor(
[[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
UpperCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowercase_ , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowercase_ , lowercase_ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowercase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase_ , lowercase_ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowercase_ , )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
snake_case_ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 78 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowercase_ :int ) -> None:
UpperCAmelCase = size
UpperCAmelCase = [0] * size
UpperCAmelCase = [0] * size
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = value
while index < self.size:
UpperCAmelCase = self.get_prev(lowercase_ ) + 1
if current_left_border == index:
UpperCAmelCase = value
else:
UpperCAmelCase = max(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = self.get_next(lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int , lowercase_ :int ) -> int:
right -= 1 # Because of right is exclusive
UpperCAmelCase = 0
while left <= right:
UpperCAmelCase = self.get_prev(lowercase_ )
if left <= current_left:
UpperCAmelCase = max(lowercase_ , self.tree[right] )
UpperCAmelCase = current_left
else:
UpperCAmelCase = max(lowercase_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """efficientnet"""
def __init__( self :List[str] , lowercase_ :int = 3 , lowercase_ :int = 6_00 , lowercase_ :float = 2.0 , lowercase_ :float = 3.1 , lowercase_ :int = 8 , lowercase_ :List[int] = [3, 3, 5, 3, 5, 5, 3] , lowercase_ :List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowercase_ :List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowercase_ :List[int] = [] , lowercase_ :List[int] = [1, 2, 2, 2, 1, 2, 1] , lowercase_ :List[int] = [1, 2, 2, 3, 3, 4, 1] , lowercase_ :List[int] = [1, 6, 6, 6, 6, 6, 6] , lowercase_ :float = 0.25 , lowercase_ :str = "swish" , lowercase_ :int = 25_60 , lowercase_ :str = "mean" , lowercase_ :float = 0.02 , lowercase_ :float = 0.001 , lowercase_ :float = 0.99 , lowercase_ :float = 0.5 , lowercase_ :float = 0.2 , **lowercase_ :Any , ) -> Dict:
super().__init__(**lowercase_ )
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = width_coefficient
UpperCAmelCase = depth_coefficient
UpperCAmelCase = depth_divisor
UpperCAmelCase = kernel_sizes
UpperCAmelCase = in_channels
UpperCAmelCase = out_channels
UpperCAmelCase = depthwise_padding
UpperCAmelCase = strides
UpperCAmelCase = num_block_repeats
UpperCAmelCase = expand_ratios
UpperCAmelCase = squeeze_expansion_ratio
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dim
UpperCAmelCase = pooling_type
UpperCAmelCase = initializer_range
UpperCAmelCase = batch_norm_eps
UpperCAmelCase = batch_norm_momentum
UpperCAmelCase = dropout_rate
UpperCAmelCase = drop_connect_rate
UpperCAmelCase = sum(lowercase_ ) * 4
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase__ ( self :int ) -> float:
return 1E-5
| 78 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :str = "▁" , lowercase_ :bool = True , lowercase_ :Union[str, AddedToken] = "<unk>" , lowercase_ :Union[str, AddedToken] = "</s>" , lowercase_ :Union[str, AddedToken] = "<pad>" , ) -> str:
UpperCAmelCase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
UpperCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase = token_dict['token']
UpperCAmelCase = Tokenizer(Unigram() )
UpperCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
UpperCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ),
pre_tokenizers.Digits(individual_digits=lowercase_ ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase = decoders.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
UpperCAmelCase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Union[str, List[str]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Union[str, Any]:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [files]
self._tokenizer.train(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :str , lowercase_ :Union[Iterator[str], Iterator[Iterator[str]]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Tuple:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
self._tokenizer.train_from_iterator(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = json.loads(self._tokenizer.to_str() )
UpperCAmelCase = self.special_tokens['unk']['id']
UpperCAmelCase = Tokenizer.from_str(json.dumps(lowercase_ ) )
| 78 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case_ = logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""pixel_values"""]
def __init__( self :int , lowercase_ :bool = True , lowercase_ :Dict[str, int] = None , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :bool = True , lowercase_ :Union[int, float] = 1 / 2_55 , lowercase_ :bool = True , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :bool = True , **lowercase_ :Union[str, Any] , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase = size if size is not None else {'height': 3_84, 'width': 3_84}
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase = do_convert_rgb
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :np.ndarray , lowercase_ :Dict[str, int] , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Any , ) -> np.ndarray:
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
UpperCAmelCase = (size['height'], size['width'])
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :np.ndarray , lowercase_ :Union[int, float] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[int] , ) -> int:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :np.ndarray , lowercase_ :Union[float, List[float]] , lowercase_ :Union[float, List[float]] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[Any] , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :ImageInput , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Dict[str, int]] = None , lowercase_ :PILImageResampling = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[float] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[str, TensorType]] = None , lowercase_ :bool = None , lowercase_ :ChannelDimension = ChannelDimension.FIRST , **lowercase_ :Tuple , ) -> PIL.Image.Image:
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase_ )
return encoded_outputs
| 78 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _lowerCAmelCase ( lowercase_ = 8 ):
UpperCAmelCase = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(lowercase_ )
UpperCAmelCase = i // 3
UpperCAmelCase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCAmelCase = (
chars_incl
+ random(lowercase_ , quotient + remainder )
+ random(lowercase_ , lowercase_ )
+ random(lowercase_ , lowercase_ )
)
UpperCAmelCase = list(lowercase_ )
shuffle(lowercase_ )
return "".join(lowercase_ )
# random is a generalised function for letters, characters and numbers
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ = 8 ):
if len(lowercase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCAmelCase = any(char in ascii_uppercase for char in password )
UpperCAmelCase = any(char in ascii_lowercase for char in password )
UpperCAmelCase = any(char in digits for char in password )
UpperCAmelCase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _lowerCAmelCase ( ):
UpperCAmelCase = int(input('Please indicate the max length of your password: ' ).strip() )
UpperCAmelCase = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(lowercase_ ) )
print(
'Alternative Password generated:' , alternative_password_generator(lowercase_ , lowercase_ ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 78 | 1 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = StableUnCLIPPipeline
__UpperCamelCase = TEXT_TO_IMAGE_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__UpperCamelCase = False
def UpperCAmelCase__ ( self :List[Any] ) -> Any:
UpperCAmelCase = 32
UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=10_00 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
UpperCAmelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL()
UpperCAmelCase = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str , lowercase_ :Dict=0 ) -> List[str]:
if str(lowercase_ ).startswith('mps' ):
UpperCAmelCase = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase__ ( self :Optional[Any] ) -> int:
UpperCAmelCase = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Any ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]:
UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCAmelCase = pipe('anime turle' , generator=lowercase_ , output_type='np' )
UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Tuple ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
UpperCAmelCase = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 78 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
"""simple docstring"""
def UpperCAmelCase__ ( self :Any ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self :List[Any] ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self :List[str] ) -> str:
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = inputs['prompt']
UpperCAmelCase = inputs['generator']
UpperCAmelCase = inputs['num_inference_steps']
UpperCAmelCase = inputs['output_type']
if "image" in inputs:
UpperCAmelCase = inputs['image']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['mask_image']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['original_image']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(lowercase_ )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase_ , lowercase_ ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = inputs['generator']
UpperCAmelCase = inputs['num_inference_steps']
UpperCAmelCase = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**lowercase_ )[0]
UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
def UpperCAmelCase__ ( self :List[Any] ) -> str:
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = pipe_loaded(**lowercase_ )[0]
UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
| 78 | 1 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
snake_case_ = threading.Lock()
snake_case_ = None
snake_case_ = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
snake_case_ = logging.WARNING
snake_case_ = True
def _lowerCAmelCase ( ):
UpperCAmelCase = os.getenv('TRANSFORMERS_VERBOSITY' , lowercase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def _lowerCAmelCase ( ):
return __name__.split('.' )[0]
def _lowerCAmelCase ( ):
return logging.getLogger(_get_library_name() )
def _lowerCAmelCase ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCAmelCase = logging.StreamHandler() # Set sys.stderr as stream.
UpperCAmelCase = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCAmelCase = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCAmelCase = False
def _lowerCAmelCase ( ):
global _default_handler
with _lock:
if not _default_handler:
return
UpperCAmelCase = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCAmelCase = None
def _lowerCAmelCase ( ):
return log_levels
def _lowerCAmelCase ( lowercase_ = None ):
if name is None:
UpperCAmelCase = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowercase_ )
def _lowerCAmelCase ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _lowerCAmelCase ( lowercase_ ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowercase_ )
def _lowerCAmelCase ( ):
return set_verbosity(lowercase_ )
def _lowerCAmelCase ( ):
return set_verbosity(lowercase_ )
def _lowerCAmelCase ( ):
return set_verbosity(lowercase_ )
def _lowerCAmelCase ( ):
return set_verbosity(lowercase_ )
def _lowerCAmelCase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _lowerCAmelCase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _lowerCAmelCase ( lowercase_ ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowercase_ )
def _lowerCAmelCase ( lowercase_ ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowercase_ )
def _lowerCAmelCase ( ):
_configure_library_root_logger()
UpperCAmelCase = False
def _lowerCAmelCase ( ):
_configure_library_root_logger()
UpperCAmelCase = True
def _lowerCAmelCase ( ):
UpperCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
UpperCAmelCase = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(lowercase_ )
def _lowerCAmelCase ( ):
UpperCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowercase_ )
def _lowerCAmelCase ( self , *lowercase_ , **lowercase_ ):
UpperCAmelCase = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , lowercase_ )
if no_advisory_warnings:
return
self.warning(*lowercase_ , **lowercase_ )
snake_case_ = warning_advice
@functools.lru_cache(lowercase_ )
def _lowerCAmelCase ( self , *lowercase_ , **lowercase_ ):
self.warning(*lowercase_ , **lowercase_ )
snake_case_ = warning_once
class A_ :
"""simple docstring"""
def __init__( self :Dict , *lowercase_ :Optional[int] , **lowercase_ :Tuple ) -> str: # pylint: disable=unused-argument
UpperCAmelCase = args[0] if args else None
def __iter__( self :Optional[int] ) -> str:
return iter(self._iterator )
def __getattr__( self :List[str] , lowercase_ :Union[str, Any] ) -> str:
def empty_fn(*lowercase_ :Optional[Any] , **lowercase_ :Dict ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self :Union[str, Any] ) -> Dict:
return self
def __exit__( self :Optional[int] , lowercase_ :List[Any] , lowercase_ :Union[str, Any] , lowercase_ :Tuple ) -> Optional[Any]:
return
class A_ :
"""simple docstring"""
def __call__( self :Union[str, Any] , *lowercase_ :List[Any] , **lowercase_ :Optional[Any] ) -> List[str]:
if _tqdm_active:
return tqdm_lib.tqdm(*lowercase_ , **lowercase_ )
else:
return EmptyTqdm(*lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :List[str] , *lowercase_ :List[str] , **lowercase_ :str ) -> List[Any]:
UpperCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[Any]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
snake_case_ = _tqdm_cls()
def _lowerCAmelCase ( ):
global _tqdm_active
return bool(_tqdm_active )
def _lowerCAmelCase ( ):
global _tqdm_active
UpperCAmelCase = True
hf_hub_utils.enable_progress_bars()
def _lowerCAmelCase ( ):
global _tqdm_active
UpperCAmelCase = False
hf_hub_utils.disable_progress_bars()
| 78 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=8 ):
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCAmelCase ( lowercase_ , lowercase_=512 , lowercase_=512 ):
UpperCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase = arr.astype(np.floataa ) / 1_2_7.5 - 1
UpperCAmelCase = np.transpose(lowercase_ , [2, 0, 1] )
UpperCAmelCase = torch.from_numpy(lowercase_ ).unsqueeze(0 )
return image
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :UNetaDConditionModel , lowercase_ :DDPMScheduler , lowercase_ :VQModel , ) -> List[str]:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Any ) -> Optional[int]:
# get the original timestep using init_timestep
UpperCAmelCase = min(int(num_inference_steps * strength ) , lowercase_ )
UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Dict , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Optional[Any] , lowercase_ :Any=None ) -> Any:
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" )
UpperCAmelCase = image.to(device=lowercase_ , dtype=lowercase_ )
UpperCAmelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase = image
else:
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ )
]
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
else:
UpperCAmelCase = self.movq.encode(lowercase_ ).latent_dist.sample(lowercase_ )
UpperCAmelCase = self.movq.config.scaling_factor * init_latents
UpperCAmelCase = torch.cat([init_latents] , dim=0 )
UpperCAmelCase = init_latents.shape
UpperCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
UpperCAmelCase = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = init_latents
return latents
def UpperCAmelCase__ ( self :int , lowercase_ :int=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self :str , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 1_00 , lowercase_ :float = 4.0 , lowercase_ :float = 0.3 , lowercase_ :int = 1 , lowercase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , ) -> List[str]:
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase = image_embeds.shape[0]
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [image]
if not all(isinstance(lowercase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(lowercase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCAmelCase = torch.cat([prepare_image(lowercase_ , lowercase_ , lowercase_ ) for i in image] , dim=0 )
UpperCAmelCase = image.to(dtype=image_embeds.dtype , device=lowercase_ )
UpperCAmelCase = self.movq.encode(lowercase_ )['latents']
UpperCAmelCase = latents.repeat_interleave(lowercase_ , dim=0 )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
UpperCAmelCase = self.prepare_latents(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , image_embeds.dtype , lowercase_ , lowercase_ )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'image_embeds': image_embeds}
UpperCAmelCase = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 78 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[Any] ) -> List[str]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
UpperCAmelCase = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> List[str]:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self :Dict ) -> Any:
UpperCAmelCase = 'sgugger/tiny-distilbert-classification'
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , only_pretrain_model=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self :str ) -> List[Any]:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , torchscript=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def UpperCAmelCase__ ( self :str ) -> List[str]:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , fpaa=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[Any]:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
# set architectures equal to `None`
UpperCAmelCase = None
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ , configs=[config] )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self :int ) -> List[Any]:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase_ , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self :str ) -> int:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ , configs=[config] )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]:
UpperCAmelCase = 'sshleifer/tinier_bart'
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ , configs=[config] )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ , configs=[config] )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self :List[Any] ) -> List[str]:
UpperCAmelCase = 'sshleifer/tinier_bart'
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ , configs=[config] )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self :Optional[int] ) -> Union[str, Any]:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , save_to_csv=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase_ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowercase_ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowercase_ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowercase_ , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowercase_ , 'env.csv' ) , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase_ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , 'env.csv' ) ).exists() )
def UpperCAmelCase__ ( self :Dict ) -> Tuple:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase_ :int ):
self.assertTrue(hasattr(lowercase_ , 'sequential' ) )
self.assertTrue(hasattr(lowercase_ , 'cumulative' ) )
self.assertTrue(hasattr(lowercase_ , 'current' ) )
self.assertTrue(hasattr(lowercase_ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase_ , 'log.txt' ) , log_print=lowercase_ , trace_memory_line_by_line=lowercase_ , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ )
UpperCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase_ , 'log.txt' ) ).exists() )
| 78 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = x
UpperCAmelCase = y
for step in range(lowercase_ ): # noqa: B007
UpperCAmelCase = a * a - b * b + x
UpperCAmelCase = 2 * a * b + y
UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) )
def _lowerCAmelCase ( lowercase_ = 800 , lowercase_ = 600 , lowercase_ = -0.6 , lowercase_ = 0 , lowercase_ = 3.2 , lowercase_ = 50 , lowercase_ = True , ):
UpperCAmelCase = Image.new('RGB' , (image_width, image_height) )
UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(lowercase_ ):
for image_y in range(lowercase_ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase = figure_width / image_width * image_height
UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase = get_distance(lowercase_ , lowercase_ , lowercase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase = get_color_coded_rgb(lowercase_ )
else:
UpperCAmelCase = get_black_and_white_rgb(lowercase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
snake_case_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 78 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
"""simple docstring"""
import requests
snake_case_ = """""" # <-- Put your OpenWeatherMap appid here!
snake_case_ = """https://api.openweathermap.org/data/2.5/"""
def _lowerCAmelCase ( lowercase_ = "Chicago" , lowercase_ = APPID ):
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def _lowerCAmelCase ( lowercase_ = "Kolkata, India" , lowercase_ = APPID ):
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def _lowerCAmelCase ( lowercase_ = 5_5.6_8 , lowercase_ = 1_2.5_7 , lowercase_ = APPID ):
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
snake_case_ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 78 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = 0
UpperCAmelCase = len(lowercase_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCAmelCase = i + 1
else:
UpperCAmelCase = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 78 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""image_processor""", """tokenizer"""]
__UpperCamelCase = """LayoutLMv2ImageProcessor"""
__UpperCamelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self :Any , lowercase_ :int=None , lowercase_ :Union[str, Any]=None , **lowercase_ :Optional[Any] ) -> Dict:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
UpperCAmelCase = kwargs.pop('feature_extractor' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :str , lowercase_ :Optional[int] , lowercase_ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ :Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ :Optional[Union[List[int], List[List[int]]]] = None , lowercase_ :bool = True , lowercase_ :Union[bool, str, PaddingStrategy] = False , lowercase_ :Union[bool, str, TruncationStrategy] = None , lowercase_ :Optional[int] = None , lowercase_ :int = 0 , lowercase_ :Optional[int] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[bool] = None , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = True , lowercase_ :Optional[Union[str, TensorType]] = None , **lowercase_ :Any , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase = features['words']
UpperCAmelCase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
UpperCAmelCase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
UpperCAmelCase = self.get_overflowing_images(lowercase_ , encoded_inputs['overflow_to_sample_mapping'] )
UpperCAmelCase = images
return encoded_inputs
def UpperCAmelCase__ ( self :Dict , lowercase_ :List[Any] , lowercase_ :Any ) -> Optional[Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(lowercase_ )} and {len(lowercase_ )}""" )
return images_with_overflow
def UpperCAmelCase__ ( self :Any , *lowercase_ :int , **lowercase_ :Tuple ) -> Tuple:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , *lowercase_ :List[Any] , **lowercase_ :Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ ( self :int ) -> Dict:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 78 | 1 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger()
@dataclass
class A_ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = field(default_factory=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = field(default_factory=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self :str , lowercase_ :Any , lowercase_ :Tensor , lowercase_ :Tensor ) -> Union[str, Any]:
UpperCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(lowercase_ , nn.Convad ) or isinstance(lowercase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowercase_ )
def __call__( self :Dict , lowercase_ :Tensor ) -> List[Any]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowercase_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCAmelCase__ ( self :Optional[int] ) -> Any:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda lowercase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 1
__UpperCamelCase = field(default_factory=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = field(default_factory=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = True
def __call__( self :Tuple , lowercase_ :Tensor ) -> Dict:
UpperCAmelCase = Tracker(self.dest )(lowercase_ ).parametrized
UpperCAmelCase = Tracker(self.src )(lowercase_ ).parametrized
UpperCAmelCase = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.src_skip , lowercase_ ) )
UpperCAmelCase = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.dest_skip , lowercase_ ) )
if len(lowercase_ ) != len(lowercase_ ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(lowercase_ )} operations while"""
f""" destination module has {len(lowercase_ )}.""" )
for dest_m, src_m in zip(lowercase_ , lowercase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self :List[Any] , lowercase_ :nn.Module ) -> Union[str, Any]:
super().__init__()
UpperCAmelCase = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f"""Unexpected layer name {k}"""
UpperCAmelCase = len(lowercase_ ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
UpperCAmelCase = nn.ModuleDict(lowercase_ )
def UpperCAmelCase__ ( self :str , lowercase_ :Tensor ) -> Tuple:
return get_trunk_forward_outputs(
lowercase_ , out_feat_keys=lowercase_ , feature_blocks=self._feature_blocks , )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :str ) -> str:
UpperCAmelCase = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self :Optional[int] , lowercase_ :str ) -> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
UpperCAmelCase = self.convert_name_to_timm(lowercase_ )
UpperCAmelCase = partial(lambda: (timm.create_model(lowercase_ , pretrained=lowercase_ ).eval(), None) )
else:
UpperCAmelCase = super().__getitem__(lowercase_ )
return val
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __getitem__( self :Tuple , lowercase_ :str ) -> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
UpperCAmelCase = RegNetModel
else:
UpperCAmelCase = RegNetForImageClassification
return val
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
for from_key, to_key in keys:
UpperCAmelCase = from_state_dict[from_key].clone()
print(F"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = True , ):
print(F"""Converting {name}...""" )
with torch.no_grad():
UpperCAmelCase , UpperCAmelCase = from_model_func()
UpperCAmelCase = our_model_func(lowercase_ ).eval()
UpperCAmelCase = ModuleTransfer(src=lowercase_ , dest=lowercase_ , raise_if_mismatch=lowercase_ )
UpperCAmelCase = torch.randn((1, 3, 224, 224) )
module_transfer(lowercase_ )
if from_state_dict is not None:
UpperCAmelCase = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCAmelCase = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
UpperCAmelCase = manually_copy_vissl_head(lowercase_ , our_model.state_dict() , lowercase_ )
our_model.load_state_dict(lowercase_ )
UpperCAmelCase = our_model(lowercase_ , output_hidden_states=lowercase_ )
UpperCAmelCase = (
our_outputs.logits if isinstance(lowercase_ , lowercase_ ) else our_outputs.last_hidden_state
)
UpperCAmelCase = from_model(lowercase_ )
UpperCAmelCase = from_output[-1] if type(lowercase_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCAmelCase = our_outputs.hidden_states[-1]
assert torch.allclose(lowercase_ , lowercase_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=lowercase_ , )
UpperCAmelCase = 224 if 'seer' not in name else 384
# we can use the convnext one
UpperCAmelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=lowercase_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=lowercase_ , )
print(F"""Pushed {name}""" )
def _lowerCAmelCase ( lowercase_ , lowercase_ = None , lowercase_ = True ):
UpperCAmelCase = 'imagenet-1k-id2label.json'
UpperCAmelCase = 1000
UpperCAmelCase = (1, num_labels)
UpperCAmelCase = 'huggingface/label-files'
UpperCAmelCase = num_labels
UpperCAmelCase = json.load(open(cached_download(hf_hub_url(lowercase_ , lowercase_ , repo_type='dataset' ) ) , 'r' ) )
UpperCAmelCase = {int(lowercase_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = partial(lowercase_ , num_labels=lowercase_ , idalabel=lowercase_ , labelaid=lowercase_ )
UpperCAmelCase = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
UpperCAmelCase = NameToOurModelFuncMap()
UpperCAmelCase = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowercase_ , lowercase_ ) -> Tuple[nn.Module, Dict]:
UpperCAmelCase = torch.hub.load_state_dict_from_url(lowercase_ , model_dir=str(lowercase_ ) , map_location='cpu' )
UpperCAmelCase = model_func()
# check if we have a head, if yes add it
UpperCAmelCase = files['classy_state_dict']['base_model']['model']
UpperCAmelCase = model_state_dict['trunk']
model.load_state_dict(lowercase_ )
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCAmelCase = partial(
lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase = partial(
lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase = partial(
lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase = partial(
lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
UpperCAmelCase = partial(
lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase = partial(
lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase = partial(
lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase = partial(
lowercase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
lowercase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowercase_ , lowercase_ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowercase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowercase_ , lowercase_ , lowercase_ , )
return config, expected_shape
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
snake_case_ = parser.parse_args()
snake_case_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 78 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Union[str, Any] ) -> str:
UpperCAmelCase = {}
def UpperCAmelCase__ ( self :Any , lowercase_ :List[Any] , lowercase_ :List[str] , lowercase_ :Dict=1 ) -> List[Any]:
if self.graph.get(lowercase_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCAmelCase = [[w, v]]
if not self.graph.get(lowercase_ ):
UpperCAmelCase = []
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
return list(self.graph )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Optional[int] , lowercase_ :Optional[Any] ) -> Dict:
if self.graph.get(lowercase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Tuple=-2 , lowercase_ :List[Any]=-1 ) -> List[Any]:
if s == d:
return []
UpperCAmelCase = []
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return visited
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int=-1 ) -> Tuple:
if c == -1:
UpperCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(lowercase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase_ , lowercase_ , 1 )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :Optional[Any]=-2 ) -> Optional[int]:
UpperCAmelCase = deque()
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
d.append(lowercase_ )
visited.append(lowercase_ )
while d:
UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[int] ) -> List[Any]:
UpperCAmelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[str] ) -> List[str]:
return len(self.graph[u] )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Any=-2 ) -> int:
UpperCAmelCase = []
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return sorted_nodes
def UpperCAmelCase__ ( self :str ) -> str:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return list(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> Tuple:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return False
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int=-2 , lowercase_ :List[str]=-1 ) -> Any:
UpperCAmelCase = time()
self.dfs(lowercase_ , lowercase_ )
UpperCAmelCase = time()
return end - begin
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[str]=-2 ) -> str:
UpperCAmelCase = time()
self.bfs(lowercase_ )
UpperCAmelCase = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :List[str] ) -> Union[str, Any]:
UpperCAmelCase = {}
def UpperCAmelCase__ ( self :str , lowercase_ :Dict , lowercase_ :Optional[Any] , lowercase_ :Optional[int]=1 ) -> Dict:
# check if the u exists
if self.graph.get(lowercase_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCAmelCase = [[w, v]]
# add the other way
if self.graph.get(lowercase_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCAmelCase = [[w, u]]
def UpperCAmelCase__ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :Tuple ) -> Optional[Any]:
if self.graph.get(lowercase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_ )
# the other way round
if self.graph.get(lowercase_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Optional[int]=-2 , lowercase_ :Optional[int]=-1 ) -> List[str]:
if s == d:
return []
UpperCAmelCase = []
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return visited
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Optional[int]=-1 ) -> Any:
if c == -1:
UpperCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(lowercase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase_ , lowercase_ , 1 )
def UpperCAmelCase__ ( self :Dict , lowercase_ :int=-2 ) -> int:
UpperCAmelCase = deque()
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
d.append(lowercase_ )
visited.append(lowercase_ )
while d:
UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[Any] ) -> str:
return len(self.graph[u] )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Any:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return list(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return False
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Union[str, Any]:
return list(self.graph )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Union[str, Any]=-2 , lowercase_ :List[str]=-1 ) -> str:
UpperCAmelCase = time()
self.dfs(lowercase_ , lowercase_ )
UpperCAmelCase = time()
return end - begin
def UpperCAmelCase__ ( self :Any , lowercase_ :int=-2 ) -> str:
UpperCAmelCase = time()
self.bfs(lowercase_ )
UpperCAmelCase = time()
return end - begin
| 78 | 1 |
"""simple docstring"""
import requests
snake_case_ = """""" # <-- Put your OpenWeatherMap appid here!
snake_case_ = """https://api.openweathermap.org/data/2.5/"""
def _lowerCAmelCase ( lowercase_ = "Chicago" , lowercase_ = APPID ):
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def _lowerCAmelCase ( lowercase_ = "Kolkata, India" , lowercase_ = APPID ):
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def _lowerCAmelCase ( lowercase_ = 5_5.6_8 , lowercase_ = 1_2.5_7 , lowercase_ = APPID ):
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
snake_case_ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 78 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 78 | 1 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = """▁"""
snake_case_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
snake_case_ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
snake_case_ = {
"""facebook/s2t-small-librispeech-asr""": 1024,
}
snake_case_ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
snake_case_ = {"""mustc""": MUSTC_LANGS}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = MAX_MODEL_INPUT_SIZES
__UpperCamelCase = ["""input_ids""", """attention_mask"""]
__UpperCamelCase = []
def __init__( self :int , lowercase_ :Optional[int] , lowercase_ :Tuple , lowercase_ :str="<s>" , lowercase_ :int="</s>" , lowercase_ :Union[str, Any]="<pad>" , lowercase_ :Optional[Any]="<unk>" , lowercase_ :Union[str, Any]=False , lowercase_ :Tuple=False , lowercase_ :Any=None , lowercase_ :Optional[Any]=None , lowercase_ :Optional[Dict[str, Any]] = None , **lowercase_ :Tuple , ) -> None:
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , do_upper_case=lowercase_ , do_lower_case=lowercase_ , tgt_lang=lowercase_ , lang_codes=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase = do_upper_case
UpperCAmelCase = do_lower_case
UpperCAmelCase = load_json(lowercase_ )
UpperCAmelCase = {v: k for k, v in self.encoder.items()}
UpperCAmelCase = spm_file
UpperCAmelCase = load_spm(lowercase_ , self.sp_model_kwargs )
if lang_codes is not None:
UpperCAmelCase = lang_codes
UpperCAmelCase = LANGUAGES[lang_codes]
UpperCAmelCase = [f"""<lang:{lang}>""" for lang in self.langs]
UpperCAmelCase = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
UpperCAmelCase = self.lang_tokens
UpperCAmelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
UpperCAmelCase = {}
@property
def UpperCAmelCase__ ( self :int ) -> int:
return len(self.encoder )
@property
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
return self._tgt_lang
@tgt_lang.setter
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Dict ) -> None:
UpperCAmelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str ) -> None:
UpperCAmelCase = self.lang_code_to_id[tgt_lang]
UpperCAmelCase = [lang_code_id]
def UpperCAmelCase__ ( self :Any , lowercase_ :str ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Optional[int] ) -> int:
return self.encoder.get(lowercase_ , self.encoder[self.unk_token] )
def UpperCAmelCase__ ( self :Dict , lowercase_ :int ) -> str:
return self.decoder.get(lowercase_ , self.unk_token )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[str] ) -> str:
UpperCAmelCase = []
UpperCAmelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
UpperCAmelCase = self.sp_model.decode(lowercase_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
UpperCAmelCase = []
else:
current_sub_tokens.append(lowercase_ )
UpperCAmelCase = self.sp_model.decode(lowercase_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCAmelCase__ ( self :Tuple , lowercase_ :Optional[int] , lowercase_ :int=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None , lowercase_ :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
UpperCAmelCase = [1] * len(self.prefix_tokens )
UpperCAmelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase_ )) + ([0] * len(lowercase_ )) + suffix_ones
def UpperCAmelCase__ ( self :Dict ) -> Dict:
UpperCAmelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Optional[Any] ) -> Dict:
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self :str , lowercase_ :Dict ) -> None:
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase = {}
UpperCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :str , lowercase_ :Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase = Path(lowercase_ )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
UpperCAmelCase = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
UpperCAmelCase = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , lowercase_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowercase_ )
elif not os.path.isfile(self.spm_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (str(lowercase_ ), str(lowercase_ ))
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = sentencepiece.SentencePieceProcessor(**lowercase_ )
spm.Load(str(lowercase_ ) )
return spm
def _lowerCAmelCase ( lowercase_ ):
with open(lowercase_ , 'r' ) as f:
return json.load(lowercase_ )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
with open(lowercase_ , 'w' ) as f:
json.dump(lowercase_ , lowercase_ , indent=2 )
| 78 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ , lowercase_ = " " ):
UpperCAmelCase = []
UpperCAmelCase = 0
for index, char in enumerate(lowercase_ ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase = index + 1
elif index + 1 == len(lowercase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 78 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = []
UpperCAmelCase = 11
UpperCAmelCase = int('1' + '0' * digit_len )
for num in range(lowercase_ , lowercase_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowercase_ , lowercase_ ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
UpperCAmelCase = 10
return solutions
def _lowerCAmelCase ( lowercase_ = 2 ):
UpperCAmelCase = 1.0
for fraction in fraction_list(lowercase_ ):
UpperCAmelCase = Fraction(lowercase_ )
result *= frac.denominator / frac.numerator
return int(lowercase_ )
if __name__ == "__main__":
print(solution())
| 78 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
snake_case_ = logging.getLogger(__name__)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = git.Repo(search_parent_directories=lowercase_ )
UpperCAmelCase = {
'repo_id': str(lowercase_ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , 'git_log.json' ) , 'w' ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def _lowerCAmelCase ( lowercase_ ):
if params.n_gpu <= 0:
UpperCAmelCase = 0
UpperCAmelCase = -1
UpperCAmelCase = True
UpperCAmelCase = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCAmelCase = int(os.environ['WORLD_SIZE'] )
UpperCAmelCase = int(os.environ['N_GPU_NODE'] )
UpperCAmelCase = int(os.environ['RANK'] )
# number of nodes / node ID
UpperCAmelCase = params.world_size // params.n_gpu_per_node
UpperCAmelCase = params.global_rank // params.n_gpu_per_node
UpperCAmelCase = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 1
UpperCAmelCase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCAmelCase = params.node_id == 0 and params.local_rank == 0
UpperCAmelCase = params.n_nodes > 1
# summary
UpperCAmelCase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _lowerCAmelCase ( lowercase_ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 78 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """codegen"""
__UpperCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :Optional[Any] , lowercase_ :Optional[int]=5_04_00 , lowercase_ :Optional[int]=20_48 , lowercase_ :Union[str, Any]=20_48 , lowercase_ :int=40_96 , lowercase_ :List[str]=28 , lowercase_ :Dict=16 , lowercase_ :Tuple=64 , lowercase_ :Dict=None , lowercase_ :int="gelu_new" , lowercase_ :str=0.0 , lowercase_ :Optional[int]=0.0 , lowercase_ :Dict=0.0 , lowercase_ :List[str]=1E-5 , lowercase_ :Union[str, Any]=0.02 , lowercase_ :Optional[Any]=True , lowercase_ :Optional[int]=5_02_56 , lowercase_ :int=5_02_56 , lowercase_ :List[str]=False , **lowercase_ :Dict , ) -> str:
UpperCAmelCase = vocab_size
UpperCAmelCase = n_ctx
UpperCAmelCase = n_positions
UpperCAmelCase = n_embd
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = n_inner
UpperCAmelCase = rotary_dim
UpperCAmelCase = activation_function
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = attn_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = use_cache
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
super().__init__(
bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Optional[int] , lowercase_ :PretrainedConfig , lowercase_ :str = "default" , lowercase_ :List[PatchingSpec] = None , lowercase_ :bool = False , ) -> Optional[Any]:
super().__init__(lowercase_ , task=lowercase_ , patching_specs=lowercase_ , use_past=lowercase_ )
if not getattr(self._config , 'pad_token_id' , lowercase_ ):
# TODO: how to do that better?
UpperCAmelCase = 0
@property
def UpperCAmelCase__ ( self :Tuple ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
UpperCAmelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase__ ( self :Any ) -> int:
return self._config.n_layer
@property
def UpperCAmelCase__ ( self :Any ) -> int:
return self._config.n_head
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :PreTrainedTokenizer , lowercase_ :int = -1 , lowercase_ :int = -1 , lowercase_ :bool = False , lowercase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
UpperCAmelCase = super(lowercase_ , self ).generate_dummy_inputs(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCAmelCase = seqlen + 2
UpperCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(self.num_layers )
]
UpperCAmelCase = common_inputs['attention_mask']
if self.use_past:
UpperCAmelCase = ordered_inputs['attention_mask'].dtype
UpperCAmelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase__ ( self :List[str] ) -> int:
return 13
| 78 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
snake_case_ = """1"""
snake_case_ = """0"""
snake_case_ = """1"""
snake_case_ = ort.SessionOptions()
snake_case_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
snake_case_ = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
snake_case_ = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
snake_case_ = ort.RunOptions()
snake_case_ = 128
snake_case_ = 1
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
snake_case_ = time.time()
snake_case_ = 2000
snake_case_ = {}
for iter in range(max_iters):
snake_case_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1000 / max_iters))
| 78 | 1 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
snake_case_ = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
snake_case_ = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def _lowerCAmelCase ( ):
UpperCAmelCase = calculate_rouge(lowercase_ , lowercase_ , bootstrap_aggregation=lowercase_ , rouge_keys=['rouge2', 'rougeL'] )
assert isinstance(lowercase_ , lowercase_ )
UpperCAmelCase = calculate_rouge(lowercase_ , lowercase_ , bootstrap_aggregation=lowercase_ , rouge_keys=['rouge2'] )
assert (
pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean()
)
def _lowerCAmelCase ( ):
UpperCAmelCase = 'rougeLsum'
UpperCAmelCase = calculate_rouge(lowercase_ , lowercase_ , newline_sep=lowercase_ , rouge_keys=[k] )[k]
UpperCAmelCase = calculate_rouge(lowercase_ , lowercase_ , newline_sep=lowercase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _lowerCAmelCase ( ):
UpperCAmelCase = ['rouge1', 'rouge2', 'rougeL']
UpperCAmelCase = calculate_rouge(lowercase_ , lowercase_ , newline_sep=lowercase_ , rouge_keys=lowercase_ )
UpperCAmelCase = calculate_rouge(lowercase_ , lowercase_ , newline_sep=lowercase_ , rouge_keys=lowercase_ )
assert score_sep == score_no_sep
def _lowerCAmelCase ( ):
UpperCAmelCase = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .',
]
UpperCAmelCase = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(lowercase_ , lowercase_ , newline_sep=lowercase_ ) == calculate_rouge(lowercase_ , lowercase_ , newline_sep=lowercase_ )
def _lowerCAmelCase ( ):
UpperCAmelCase = [
'" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '
]
UpperCAmelCase = [
' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
UpperCAmelCase = calculate_rouge(lowercase_ , lowercase_ , rouge_keys=['rougeLsum'] , newline_sep=lowercase_ )['rougeLsum']
UpperCAmelCase = calculate_rouge(lowercase_ , lowercase_ , rouge_keys=['rougeLsum'] )['rougeLsum']
assert new_score > prev_score
def _lowerCAmelCase ( ):
UpperCAmelCase = Path('examples/seq2seq/test_data/wmt_en_ro' )
UpperCAmelCase = calculate_rouge_path(data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) )
assert isinstance(lowercase_ , lowercase_ )
UpperCAmelCase = calculate_rouge_path(
data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) , bootstrap_aggregation=lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
| 78 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case_ = logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""pixel_values"""]
def __init__( self :int , lowercase_ :bool = True , lowercase_ :Dict[str, int] = None , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :bool = True , lowercase_ :Union[int, float] = 1 / 2_55 , lowercase_ :bool = True , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :bool = True , **lowercase_ :Union[str, Any] , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase = size if size is not None else {'height': 3_84, 'width': 3_84}
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase = do_convert_rgb
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :np.ndarray , lowercase_ :Dict[str, int] , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Any , ) -> np.ndarray:
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
UpperCAmelCase = (size['height'], size['width'])
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :np.ndarray , lowercase_ :Union[int, float] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[int] , ) -> int:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :np.ndarray , lowercase_ :Union[float, List[float]] , lowercase_ :Union[float, List[float]] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[Any] , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :ImageInput , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Dict[str, int]] = None , lowercase_ :PILImageResampling = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[float] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[str, TensorType]] = None , lowercase_ :bool = None , lowercase_ :ChannelDimension = ChannelDimension.FIRST , **lowercase_ :Tuple , ) -> PIL.Image.Image:
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase_ )
return encoded_outputs
| 78 | 1 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
snake_case_ = logging.getLogger(__name__)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = git.Repo(search_parent_directories=lowercase_ )
UpperCAmelCase = {
'repo_id': str(lowercase_ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , 'git_log.json' ) , 'w' ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def _lowerCAmelCase ( lowercase_ ):
if params.n_gpu <= 0:
UpperCAmelCase = 0
UpperCAmelCase = -1
UpperCAmelCase = True
UpperCAmelCase = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCAmelCase = int(os.environ['WORLD_SIZE'] )
UpperCAmelCase = int(os.environ['N_GPU_NODE'] )
UpperCAmelCase = int(os.environ['RANK'] )
# number of nodes / node ID
UpperCAmelCase = params.world_size // params.n_gpu_per_node
UpperCAmelCase = params.global_rank // params.n_gpu_per_node
UpperCAmelCase = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 1
UpperCAmelCase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCAmelCase = params.node_id == 0 and params.local_rank == 0
UpperCAmelCase = params.n_nodes > 1
# summary
UpperCAmelCase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _lowerCAmelCase ( lowercase_ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 78 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """beit"""
def __init__( self :List[str] , lowercase_ :List[Any]=81_92 , lowercase_ :str=7_68 , lowercase_ :List[str]=12 , lowercase_ :Optional[int]=12 , lowercase_ :Dict=30_72 , lowercase_ :Tuple="gelu" , lowercase_ :Any=0.0 , lowercase_ :Optional[int]=0.0 , lowercase_ :Dict=0.02 , lowercase_ :int=1E-12 , lowercase_ :List[Any]=2_24 , lowercase_ :Dict=16 , lowercase_ :List[Any]=3 , lowercase_ :List[str]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Union[str, Any]=0.1 , lowercase_ :str=0.1 , lowercase_ :str=True , lowercase_ :List[str]=[3, 5, 7, 11] , lowercase_ :Optional[int]=[1, 2, 3, 6] , lowercase_ :str=True , lowercase_ :int=0.4 , lowercase_ :Union[str, Any]=2_56 , lowercase_ :int=1 , lowercase_ :Tuple=False , lowercase_ :Optional[int]=2_55 , **lowercase_ :str , ) -> Any:
super().__init__(**lowercase_ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = use_mask_token
UpperCAmelCase = use_absolute_position_embeddings
UpperCAmelCase = use_relative_position_bias
UpperCAmelCase = use_shared_relative_position_bias
UpperCAmelCase = layer_scale_init_value
UpperCAmelCase = drop_path_rate
UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase = out_indices
UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase = use_auxiliary_head
UpperCAmelCase = auxiliary_loss_weight
UpperCAmelCase = auxiliary_channels
UpperCAmelCase = auxiliary_num_convs
UpperCAmelCase = auxiliary_concat_input
UpperCAmelCase = semantic_loss_ignore_index
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self :Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase__ ( self :Tuple ) -> float:
return 1E-4
| 78 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] , lowercase_ :Optional[Any] , ) -> str:
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = 99
UpperCAmelCase = 32
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 37
UpperCAmelCase = 'gelu'
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 5_12
UpperCAmelCase = 16
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = None
def UpperCAmelCase__ ( self :List[Any] ) -> int:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Dict , lowercase_ :str , lowercase_ :List[str] , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Optional[int] ) -> List[str]:
UpperCAmelCase = TFDistilBertModel(config=lowercase_ )
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self :Any , lowercase_ :Tuple , lowercase_ :Tuple , lowercase_ :Optional[Any] , lowercase_ :Any , lowercase_ :Tuple , lowercase_ :Dict ) -> Tuple:
UpperCAmelCase = TFDistilBertForMaskedLM(config=lowercase_ )
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :Any , lowercase_ :Any , lowercase_ :Any , lowercase_ :str , lowercase_ :Tuple ) -> str:
UpperCAmelCase = TFDistilBertForQuestionAnswering(config=lowercase_ )
UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :List[Any] , lowercase_ :Any , lowercase_ :Dict , lowercase_ :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :List[str] ) -> int:
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFDistilBertForSequenceClassification(lowercase_ )
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self :Any , lowercase_ :Dict , lowercase_ :List[str] , lowercase_ :Union[str, Any] , lowercase_ :Tuple , lowercase_ :Union[str, Any] , lowercase_ :Any ) -> Dict:
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFDistilBertForMultipleChoice(lowercase_ )
UpperCAmelCase = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self :Dict , lowercase_ :Optional[int] , lowercase_ :Dict , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :Tuple ) -> Optional[Any]:
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFDistilBertForTokenClassification(lowercase_ )
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[Any]:
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__UpperCamelCase = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def UpperCAmelCase__ ( self :str ) -> Any:
UpperCAmelCase = TFDistilBertModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , dim=37 )
def UpperCAmelCase__ ( self :str ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :List[Any] ) -> str:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase_ )
def UpperCAmelCase__ ( self :Dict ) -> List[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> List[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Dict:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase_ )
def UpperCAmelCase__ ( self :int ) -> Dict:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase_ )
@slow
def UpperCAmelCase__ ( self :Tuple ) -> int:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
UpperCAmelCase = TFDistilBertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_tf
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[int]:
UpperCAmelCase = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase = model(lowercase_ )[0]
UpperCAmelCase = [1, 6, 7_68]
self.assertEqual(output.shape , lowercase_ )
UpperCAmelCase = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1E-4 )
| 78 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case_ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 | 1 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :str = "▁" , lowercase_ :bool = True , lowercase_ :Union[str, AddedToken] = "<unk>" , lowercase_ :Union[str, AddedToken] = "</s>" , lowercase_ :Union[str, AddedToken] = "<pad>" , ) -> str:
UpperCAmelCase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
UpperCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase = token_dict['token']
UpperCAmelCase = Tokenizer(Unigram() )
UpperCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
UpperCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ),
pre_tokenizers.Digits(individual_digits=lowercase_ ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase = decoders.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
UpperCAmelCase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Union[str, List[str]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Union[str, Any]:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [files]
self._tokenizer.train(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :str , lowercase_ :Union[Iterator[str], Iterator[Iterator[str]]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Tuple:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
self._tokenizer.train_from_iterator(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = json.loads(self._tokenizer.to_str() )
UpperCAmelCase = self.special_tokens['unk']['id']
UpperCAmelCase = Tokenizer.from_str(json.dumps(lowercase_ ) )
| 78 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , lowercase_ ).groups()[0]
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :Dict , lowercase_ :List[str]=None , lowercase_ :Optional[Any]=None ) -> Optional[int]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self :Optional[int] ) -> Optional[Any]:
return len(self.file_names )
def __getitem__( self :int , lowercase_ :str ) -> List[str]:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowercase_ )
UpperCAmelCase = raw_image.convert('RGB' )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowercase_ )
UpperCAmelCase = extract_label(lowercase_ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['lr']
UpperCAmelCase = int(config['num_epochs'] )
UpperCAmelCase = int(config['seed'] )
UpperCAmelCase = int(config['batch_size'] )
UpperCAmelCase = config['image_size']
if not isinstance(lowercase_ , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(lowercase_ )[-1].split('.' )[0]
accelerator.init_trackers(lowercase_ , lowercase_ )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , lowercase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
UpperCAmelCase = [extract_label(lowercase_ ) for fname in file_names]
UpperCAmelCase = list(set(lowercase_ ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(lowercase_ )}
# Set the seed before splitting the data.
np.random.seed(lowercase_ )
torch.manual_seed(lowercase_ )
torch.cuda.manual_seed_all(lowercase_ )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(lowercase_ ) )
UpperCAmelCase = int(0.8 * len(lowercase_ ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(lowercase_ , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(lowercase_ ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model('resnet50d' , pretrained=lowercase_ , num_classes=len(lowercase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=lowercase_ , max_lr=lowercase_ , epochs=lowercase_ , steps_per_epoch=len(lowercase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(lowercase_ )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace('epoch_' , '' ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace('step_' , '' ) )
UpperCAmelCase = resume_step // len(lowercase_ )
resume_step -= starting_epoch * len(lowercase_ )
# Now we train the model
for epoch in range(lowercase_ , lowercase_ ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(lowercase_ , lowercase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch['image'] - mean) / std
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = torch.nn.functional.cross_entropy(lowercase_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch['image'] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['label']) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(lowercase_ ),
'epoch': epoch,
} , step=lowercase_ , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase ( ):
UpperCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=lowercase_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=lowercase_ , default=lowercase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=lowercase_ , default=lowercase_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=lowercase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=lowercase_ , default=lowercase_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=lowercase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 78 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ , lowercase_ = " " ):
UpperCAmelCase = []
UpperCAmelCase = 0
for index, char in enumerate(lowercase_ ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase = index + 1
elif index + 1 == len(lowercase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 78 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = list(range(len(lowercase_ ) ) )
UpperCAmelCase = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
UpperCAmelCase = 0
UpperCAmelCase = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
"""simple docstring"""
def UpperCAmelCase__ ( self :Any ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self :List[Any] ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self :List[str] ) -> str:
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = inputs['prompt']
UpperCAmelCase = inputs['generator']
UpperCAmelCase = inputs['num_inference_steps']
UpperCAmelCase = inputs['output_type']
if "image" in inputs:
UpperCAmelCase = inputs['image']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['mask_image']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['original_image']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(lowercase_ )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase_ , lowercase_ ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = inputs['generator']
UpperCAmelCase = inputs['num_inference_steps']
UpperCAmelCase = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**lowercase_ )[0]
UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
def UpperCAmelCase__ ( self :List[Any] ) -> str:
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = pipe_loaded(**lowercase_ )[0]
UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
| 78 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Any , *lowercase_ :str , **lowercase_ :List[Any] ) -> Union[str, Any]:
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any=None , lowercase_ :Optional[int]=None , lowercase_ :Tuple=None , **lowercase_ :Tuple ) -> Dict:
UpperCAmelCase , UpperCAmelCase = {}, {}
if padding is not None:
UpperCAmelCase = padding
if truncation is not None:
UpperCAmelCase = truncation
if top_k is not None:
UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self :List[Any] , lowercase_ :Union["Image.Image", str] , lowercase_ :str = None , **lowercase_ :Union[str, Any] ) -> Union[str, Any]:
if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = {'image': image, 'question': question}
else:
UpperCAmelCase = image
UpperCAmelCase = super().__call__(lowercase_ , **lowercase_ )
return results
def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[Any] , lowercase_ :int=False , lowercase_ :Optional[int]=False ) -> Union[str, Any]:
UpperCAmelCase = load_image(inputs['image'] )
UpperCAmelCase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ )
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[str] ) -> Any:
UpperCAmelCase = self.model(**lowercase_ )
return model_outputs
def UpperCAmelCase__ ( self :Dict , lowercase_ :Tuple , lowercase_ :List[Any]=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.sigmoid()[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(lowercase_ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 78 | 1 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowerCAmelCase ( lowercase_ = "isbn/0140328726" ):
UpperCAmelCase = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
UpperCAmelCase = F"""{olid} is not a valid Open Library olid"""
raise ValueError(lowercase_ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
UpperCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
UpperCAmelCase = data['First sentence']['value']
for key, value in data.items():
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = ', '.join(lowercase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
snake_case_ = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
snake_case_ = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 78 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """transfo-xl"""
__UpperCamelCase = ["""mems"""]
__UpperCamelCase = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :List[Any] , lowercase_ :Optional[int]=26_77_35 , lowercase_ :Union[str, Any]=[2_00_00, 4_00_00, 20_00_00] , lowercase_ :List[Any]=10_24 , lowercase_ :Optional[Any]=10_24 , lowercase_ :Tuple=16 , lowercase_ :Tuple=64 , lowercase_ :Any=40_96 , lowercase_ :int=4 , lowercase_ :List[str]=False , lowercase_ :Union[str, Any]=18 , lowercase_ :Optional[Any]=16_00 , lowercase_ :Dict=10_00 , lowercase_ :Optional[int]=True , lowercase_ :Tuple=True , lowercase_ :Dict=0 , lowercase_ :Tuple=-1 , lowercase_ :Optional[int]=True , lowercase_ :Optional[int]=0.1 , lowercase_ :str=0.0 , lowercase_ :List[str]=True , lowercase_ :int="normal" , lowercase_ :Dict=0.01 , lowercase_ :Optional[Any]=0.01 , lowercase_ :Dict=0.02 , lowercase_ :Tuple=1E-5 , lowercase_ :str=0 , **lowercase_ :Tuple , ) -> List[str]:
UpperCAmelCase = vocab_size
UpperCAmelCase = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
UpperCAmelCase = [False] + [True] * len(self.cutoffs )
else:
UpperCAmelCase = [False] + [False] * len(self.cutoffs )
UpperCAmelCase = d_model
UpperCAmelCase = d_embed
UpperCAmelCase = d_head
UpperCAmelCase = d_inner
UpperCAmelCase = div_val
UpperCAmelCase = pre_lnorm
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = mem_len
UpperCAmelCase = same_length
UpperCAmelCase = attn_type
UpperCAmelCase = clamp_len
UpperCAmelCase = sample_softmax
UpperCAmelCase = adaptive
UpperCAmelCase = dropout
UpperCAmelCase = dropatt
UpperCAmelCase = untie_r
UpperCAmelCase = init
UpperCAmelCase = init_range
UpperCAmelCase = proj_init_std
UpperCAmelCase = init_std
UpperCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Any:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any ) -> Tuple:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 78 | 1 |
"""simple docstring"""
snake_case_ = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
10: """a""",
11: """b""",
12: """c""",
13: """d""",
14: """e""",
15: """f""",
}
def _lowerCAmelCase ( lowercase_ ):
assert type(lowercase_ ) in (int, float) and decimal == int(lowercase_ )
UpperCAmelCase = int(lowercase_ )
UpperCAmelCase = ''
UpperCAmelCase = False
if decimal < 0:
UpperCAmelCase = True
decimal *= -1
while decimal > 0:
UpperCAmelCase , UpperCAmelCase = divmod(lowercase_ , 16 )
UpperCAmelCase = values[remainder] + hexadecimal
UpperCAmelCase = '0x' + hexadecimal
if negative:
UpperCAmelCase = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowerCAmelCase ( lowercase_ = "isbn/0140328726" ):
UpperCAmelCase = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
UpperCAmelCase = F"""{olid} is not a valid Open Library olid"""
raise ValueError(lowercase_ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
UpperCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
UpperCAmelCase = data['First sentence']['value']
for key, value in data.items():
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = ', '.join(lowercase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
snake_case_ = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
snake_case_ = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 78 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """imagegpt"""
__UpperCamelCase = ["""past_key_values"""]
__UpperCamelCase = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :int , lowercase_ :List[str]=5_12 + 1 , lowercase_ :Dict=32 * 32 , lowercase_ :List[str]=5_12 , lowercase_ :str=24 , lowercase_ :List[Any]=8 , lowercase_ :Optional[int]=None , lowercase_ :List[Any]="quick_gelu" , lowercase_ :str=0.1 , lowercase_ :List[Any]=0.1 , lowercase_ :List[str]=0.1 , lowercase_ :List[str]=1E-5 , lowercase_ :Tuple=0.02 , lowercase_ :List[str]=True , lowercase_ :Optional[int]=True , lowercase_ :str=False , lowercase_ :Union[str, Any]=False , lowercase_ :List[str]=False , **lowercase_ :int , ) -> Optional[Any]:
UpperCAmelCase = vocab_size
UpperCAmelCase = n_positions
UpperCAmelCase = n_embd
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = n_inner
UpperCAmelCase = activation_function
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = attn_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = scale_attn_weights
UpperCAmelCase = use_cache
UpperCAmelCase = scale_attn_by_inverse_layer_idx
UpperCAmelCase = reorder_and_upcast_attn
UpperCAmelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=lowercase_ , **lowercase_ )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self :int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :"FeatureExtractionMixin" , lowercase_ :int = 1 , lowercase_ :int = -1 , lowercase_ :bool = False , lowercase_ :Optional["TensorType"] = None , lowercase_ :int = 3 , lowercase_ :int = 32 , lowercase_ :int = 32 , ) -> Mapping[str, Any]:
UpperCAmelCase = self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = dict(preprocessor(images=lowercase_ , return_tensors=lowercase_ ) )
return inputs
| 78 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :int , lowercase_ :Optional[int]=None , lowercase_ :List[str]=None ) -> str:
UpperCAmelCase = data
UpperCAmelCase = previous
UpperCAmelCase = next_node
def __str__( self :Optional[Any] ) -> str:
return f"""{self.data}"""
def UpperCAmelCase__ ( self :int ) -> int:
return self.data
def UpperCAmelCase__ ( self :List[str] ) -> Any:
return self.next
def UpperCAmelCase__ ( self :Tuple ) -> Optional[int]:
return self.previous
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :Optional[Any] ) -> str:
UpperCAmelCase = head
def __iter__( self :List[str] ) -> List[str]:
return self
def UpperCAmelCase__ ( self :int ) -> Any:
if not self.current:
raise StopIteration
else:
UpperCAmelCase = self.current.get_data()
UpperCAmelCase = self.current.get_next()
return value
class A_ :
"""simple docstring"""
def __init__( self :Union[str, Any] ) -> List[Any]:
UpperCAmelCase = None # First node in list
UpperCAmelCase = None # Last node in list
def __str__( self :List[Any] ) -> Optional[Any]:
UpperCAmelCase = self.head
UpperCAmelCase = []
while current is not None:
nodes.append(current.get_data() )
UpperCAmelCase = current.get_next()
return " ".join(str(lowercase_ ) for node in nodes )
def __contains__( self :str , lowercase_ :int ) -> str:
UpperCAmelCase = self.head
while current:
if current.get_data() == value:
return True
UpperCAmelCase = current.get_next()
return False
def __iter__( self :Tuple ) -> Dict:
return LinkedListIterator(self.head )
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]:
if self.head:
return self.head.get_data()
return None
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]:
if self.tail:
return self.tail.get_data()
return None
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Node ) -> None:
if self.head is None:
UpperCAmelCase = node
UpperCAmelCase = node
else:
self.insert_before_node(self.head , lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :Node ) -> None:
if self.head is None:
self.set_head(lowercase_ )
else:
self.insert_after_node(self.tail , lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int ) -> None:
UpperCAmelCase = Node(lowercase_ )
if self.head is None:
self.set_head(lowercase_ )
else:
self.set_tail(lowercase_ )
def UpperCAmelCase__ ( self :int , lowercase_ :Node , lowercase_ :Node ) -> None:
UpperCAmelCase = node
UpperCAmelCase = node.previous
if node.get_previous() is None:
UpperCAmelCase = node_to_insert
else:
UpperCAmelCase = node_to_insert
UpperCAmelCase = node_to_insert
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Node , lowercase_ :Node ) -> None:
UpperCAmelCase = node
UpperCAmelCase = node.next
if node.get_next() is None:
UpperCAmelCase = node_to_insert
else:
UpperCAmelCase = node_to_insert
UpperCAmelCase = node_to_insert
def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = 1
UpperCAmelCase = Node(lowercase_ )
UpperCAmelCase = self.head
while node:
if current_position == position:
self.insert_before_node(lowercase_ , lowercase_ )
return
current_position += 1
UpperCAmelCase = node.next
self.insert_after_node(self.tail , lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int ) -> Node:
UpperCAmelCase = self.head
while node:
if node.get_data() == item:
return node
UpperCAmelCase = node.get_next()
raise Exception('Node not found' )
def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[Any] ) -> Dict:
if (node := self.get_node(lowercase_ )) is not None:
if node == self.head:
UpperCAmelCase = self.head.get_next()
if node == self.tail:
UpperCAmelCase = self.tail.get_previous()
self.remove_node_pointers(lowercase_ )
@staticmethod
def UpperCAmelCase__ ( lowercase_ :Node ) -> None:
if node.get_next():
UpperCAmelCase = node.previous
if node.get_previous():
UpperCAmelCase = node.next
UpperCAmelCase = None
UpperCAmelCase = None
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]:
return self.head is None
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :List[str] ) -> Dict:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 5_00
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=lowercase_ ) as mock_head:
UpperCAmelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 5_00
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=lowercase_ ) as mock_head:
UpperCAmelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self :Any ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase = tempfile.mktemp()
with open(lowercase_ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , lowercase_ )
UpperCAmelCase = AlbertTokenizer.from_pretrained(lowercase_ )
finally:
os.remove(lowercase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , lowercase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def UpperCAmelCase__ ( self :Tuple ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class A_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def UpperCAmelCase__ ( cls :Tuple ) -> List[Any]:
UpperCAmelCase = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def UpperCAmelCase__ ( cls :List[str] ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def UpperCAmelCase__ ( self :Any ) -> str:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowercase_ , 'vocab.txt' )
with open(lowercase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCAmelCase = BertTokenizer(lowercase_ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ , repo_id='test-tokenizer' , push_to_hub=lowercase_ , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def UpperCAmelCase__ ( self :int ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowercase_ , 'vocab.txt' )
with open(lowercase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCAmelCase = BertTokenizer(lowercase_ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowercase_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=lowercase_ , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def UpperCAmelCase__ ( self :int ) -> Tuple:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowercase_ , 'vocab.txt' )
with open(lowercase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCAmelCase = CustomTokenizer(lowercase_ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCAmelCase = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowercase_ , 'vocab.txt' )
with open(lowercase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCAmelCase = BertTokenizerFast.from_pretrained(lowercase_ )
bert_tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase = CustomTokenizerFast.from_pretrained(lowercase_ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCAmelCase = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
UpperCAmelCase = AutoTokenizer.from_pretrained(
f"""{USER}/test-dynamic-tokenizer""" , use_fast=lowercase_ , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :List[str] ) -> Tuple:
UpperCAmelCase = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def UpperCAmelCase__ ( self :Tuple ) -> str:
UpperCAmelCase = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def UpperCAmelCase__ ( self :Any ) -> int:
UpperCAmelCase = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
UpperCAmelCase = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def UpperCAmelCase__ ( self :int ) -> Union[str, Any]:
UpperCAmelCase = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def UpperCAmelCase__ ( self :Optional[int] ) -> List[Any]:
UpperCAmelCase = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase = Trie()
UpperCAmelCase = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowercase_ , ['AB', 'C'] )
| 78 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowercase_ :int ) -> None:
UpperCAmelCase = size
UpperCAmelCase = [0] * size
UpperCAmelCase = [0] * size
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = value
while index < self.size:
UpperCAmelCase = self.get_prev(lowercase_ ) + 1
if current_left_border == index:
UpperCAmelCase = value
else:
UpperCAmelCase = max(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = self.get_next(lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int , lowercase_ :int ) -> int:
right -= 1 # Because of right is exclusive
UpperCAmelCase = 0
while left <= right:
UpperCAmelCase = self.get_prev(lowercase_ )
if left <= current_left:
UpperCAmelCase = max(lowercase_ , self.tree[right] )
UpperCAmelCase = current_left
else:
UpperCAmelCase = max(lowercase_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = []
create_all_state(1 , lowercase_ , lowercase_ , [] , lowercase_ )
return result
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(lowercase_ , total_number - level + 2 ):
current_list.append(lowercase_ )
create_all_state(i + 1 , lowercase_ , level - 1 , lowercase_ , lowercase_ )
current_list.pop()
def _lowerCAmelCase ( lowercase_ ):
for i in total_list:
print(*lowercase_ )
if __name__ == "__main__":
snake_case_ = 4
snake_case_ = 2
snake_case_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 78 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :str = "▁" , lowercase_ :bool = True , lowercase_ :Union[str, AddedToken] = "<unk>" , lowercase_ :Union[str, AddedToken] = "</s>" , lowercase_ :Union[str, AddedToken] = "<pad>" , ) -> str:
UpperCAmelCase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
UpperCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase = token_dict['token']
UpperCAmelCase = Tokenizer(Unigram() )
UpperCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
UpperCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ),
pre_tokenizers.Digits(individual_digits=lowercase_ ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase = decoders.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
UpperCAmelCase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Union[str, List[str]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Union[str, Any]:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [files]
self._tokenizer.train(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :str , lowercase_ :Union[Iterator[str], Iterator[Iterator[str]]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Tuple:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
self._tokenizer.train_from_iterator(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = json.loads(self._tokenizer.to_str() )
UpperCAmelCase = self.special_tokens['unk']['id']
UpperCAmelCase = Tokenizer.from_str(json.dumps(lowercase_ ) )
| 78 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = current_set.copy()
for row_index, row in enumerate(lowercase_ ):
UpperCAmelCase = row[0]
for column_index, column in enumerate(lowercase_ ):
if magnitude == 0:
UpperCAmelCase = column
continue
UpperCAmelCase = column / magnitude
# Subtract to cancel term
UpperCAmelCase = current_set[0]
UpperCAmelCase = [first_row]
UpperCAmelCase = current_set[1::]
for row in current_set:
UpperCAmelCase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowercase_ )
continue
for column_index in range(len(lowercase_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowercase_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCAmelCase = final_set[0]
UpperCAmelCase = []
UpperCAmelCase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCAmelCase = simplify(lowercase_ )
for i in range(len(lowercase_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowercase_ )
UpperCAmelCase = resultant
return final_set
def _lowerCAmelCase ( lowercase_ ):
if len(lowercase_ ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
UpperCAmelCase = len(lowercase_ ) + 1
if any(len(lowercase_ ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(lowercase_ , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(lowercase_ ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCAmelCase = equations.copy()
if any(0 in row for row in data_set ):
UpperCAmelCase = data_set.copy()
UpperCAmelCase = []
for row_index, row in enumerate(lowercase_ ):
if 0 not in row:
UpperCAmelCase = data_set.pop(lowercase_ )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , lowercase_ )
UpperCAmelCase = data_set.copy()
UpperCAmelCase = simplify(lowercase_ )
UpperCAmelCase = simplified[::-1]
UpperCAmelCase = []
for row in simplified:
UpperCAmelCase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCAmelCase = row.copy()[: len(lowercase_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowercase_ ) == 0:
solutions.append(0 )
continue
UpperCAmelCase = temp_row[1::]
UpperCAmelCase = temp_row[::-1]
for column_index, column in enumerate(lowercase_ ):
current_solution -= column * solutions[column_index]
solutions.append(lowercase_ )
UpperCAmelCase = []
for item in solutions:
final.append(float(round(lowercase_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 78 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _lowerCAmelCase ( lowercase_ = 8 ):
UpperCAmelCase = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(lowercase_ )
UpperCAmelCase = i // 3
UpperCAmelCase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCAmelCase = (
chars_incl
+ random(lowercase_ , quotient + remainder )
+ random(lowercase_ , lowercase_ )
+ random(lowercase_ , lowercase_ )
)
UpperCAmelCase = list(lowercase_ )
shuffle(lowercase_ )
return "".join(lowercase_ )
# random is a generalised function for letters, characters and numbers
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return "".join(secrets.choice(lowercase_ ) for _ in range(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
pass # Put your code here...
def _lowerCAmelCase ( lowercase_ , lowercase_ = 8 ):
if len(lowercase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCAmelCase = any(char in ascii_uppercase for char in password )
UpperCAmelCase = any(char in ascii_lowercase for char in password )
UpperCAmelCase = any(char in digits for char in password )
UpperCAmelCase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _lowerCAmelCase ( ):
UpperCAmelCase = int(input('Please indicate the max length of your password: ' ).strip() )
UpperCAmelCase = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(lowercase_ ) )
print(
'Alternative Password generated:' , alternative_password_generator(lowercase_ , lowercase_ ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 78 | 1 |
"""simple docstring"""
import argparse
import struct
import unittest
class A_ :
"""simple docstring"""
def __init__( self :int , lowercase_ :bytes ) -> None:
UpperCAmelCase = data
# Initialize hash values
UpperCAmelCase = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
UpperCAmelCase = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
UpperCAmelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCAmelCase__ ( lowercase_ :bytes ) -> bytes:
UpperCAmelCase = b'\x80' + (b'\x00' * (63 - (len(lowercase_ ) + 8) % 64))
UpperCAmelCase = struct.pack('>Q' , (len(lowercase_ ) * 8) )
return data + padding + big_endian_integer
def UpperCAmelCase__ ( self :List[str] ) -> None:
# Convert into blocks of 64 bytes
UpperCAmelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase = list(struct.unpack('>16L' , lowercase_ ) )
# add 48 0-ed integers
words += [0] * 48
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
UpperCAmelCase = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
UpperCAmelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
UpperCAmelCase = self.ror(lowercase_ , 6 ) ^ self.ror(lowercase_ , 11 ) ^ self.ror(lowercase_ , 25 )
UpperCAmelCase = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
UpperCAmelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
UpperCAmelCase = self.ror(lowercase_ , 2 ) ^ self.ror(lowercase_ , 13 ) ^ self.ror(lowercase_ , 22 )
UpperCAmelCase = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase = (sa + maj) % 0x100000000
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
UpperCAmelCase = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase = ''.join([hex(lowercase_ )[2:].zfill(8 ) for value in self.hashes] )
def UpperCAmelCase__ ( self :Dict , lowercase_ :int , lowercase_ :int ) -> int:
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Dict ) -> None:
import hashlib
UpperCAmelCase = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(lowercase_ ).hash , hashlib.shaaaa(lowercase_ ).hexdigest() )
def _lowerCAmelCase ( ):
import doctest
doctest.testmod()
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = bytes(lowercase_ , 'utf-8' )
print(SHAaaa(lowercase_ ).hash )
if __name__ == "__main__":
main()
| 78 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
"""simple docstring"""
def UpperCAmelCase__ ( self :Any ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self :List[Any] ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self :List[str] ) -> str:
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = inputs['prompt']
UpperCAmelCase = inputs['generator']
UpperCAmelCase = inputs['num_inference_steps']
UpperCAmelCase = inputs['output_type']
if "image" in inputs:
UpperCAmelCase = inputs['image']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['mask_image']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['original_image']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(lowercase_ )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase_ , lowercase_ ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = inputs['generator']
UpperCAmelCase = inputs['num_inference_steps']
UpperCAmelCase = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**lowercase_ )[0]
UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
def UpperCAmelCase__ ( self :List[Any] ) -> str:
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = pipe_loaded(**lowercase_ )[0]
UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
| 78 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case_ = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
snake_case_ = {
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
snake_case_ = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = SqueezeBertTokenizer
def __init__( self :List[Any] , lowercase_ :Any=None , lowercase_ :Optional[int]=None , lowercase_ :str=True , lowercase_ :Optional[Any]="[UNK]" , lowercase_ :Union[str, Any]="[SEP]" , lowercase_ :str="[PAD]" , lowercase_ :Union[str, Any]="[CLS]" , lowercase_ :Tuple="[MASK]" , lowercase_ :Optional[Any]=True , lowercase_ :Dict=None , **lowercase_ :Tuple , ) -> Optional[int]:
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowercase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase_ ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(lowercase_ , normalizer_state.pop('type' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**lowercase_ )
UpperCAmelCase = do_lower_case
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :int , lowercase_ :Dict=None ) -> Tuple:
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self :Any , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :str , lowercase_ :Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 78 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=8 ):
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCAmelCase ( lowercase_ , lowercase_=512 , lowercase_=512 ):
UpperCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase = arr.astype(np.floataa ) / 1_2_7.5 - 1
UpperCAmelCase = np.transpose(lowercase_ , [2, 0, 1] )
UpperCAmelCase = torch.from_numpy(lowercase_ ).unsqueeze(0 )
return image
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :UNetaDConditionModel , lowercase_ :DDPMScheduler , lowercase_ :VQModel , ) -> List[str]:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Any ) -> Optional[int]:
# get the original timestep using init_timestep
UpperCAmelCase = min(int(num_inference_steps * strength ) , lowercase_ )
UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Dict , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Optional[Any] , lowercase_ :Any=None ) -> Any:
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" )
UpperCAmelCase = image.to(device=lowercase_ , dtype=lowercase_ )
UpperCAmelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase = image
else:
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ )
]
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
else:
UpperCAmelCase = self.movq.encode(lowercase_ ).latent_dist.sample(lowercase_ )
UpperCAmelCase = self.movq.config.scaling_factor * init_latents
UpperCAmelCase = torch.cat([init_latents] , dim=0 )
UpperCAmelCase = init_latents.shape
UpperCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
UpperCAmelCase = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = init_latents
return latents
def UpperCAmelCase__ ( self :int , lowercase_ :int=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self :str , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 1_00 , lowercase_ :float = 4.0 , lowercase_ :float = 0.3 , lowercase_ :int = 1 , lowercase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , ) -> List[str]:
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase = image_embeds.shape[0]
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [image]
if not all(isinstance(lowercase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(lowercase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCAmelCase = torch.cat([prepare_image(lowercase_ , lowercase_ , lowercase_ ) for i in image] , dim=0 )
UpperCAmelCase = image.to(dtype=image_embeds.dtype , device=lowercase_ )
UpperCAmelCase = self.movq.encode(lowercase_ )['latents']
UpperCAmelCase = latents.repeat_interleave(lowercase_ , dim=0 )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
UpperCAmelCase = self.prepare_latents(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , image_embeds.dtype , lowercase_ , lowercase_ )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'image_embeds': image_embeds}
UpperCAmelCase = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 78 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = """▁"""
snake_case_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
snake_case_ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
snake_case_ = {
"""xlm-roberta-base""": 512,
"""xlm-roberta-large""": 512,
"""xlm-roberta-large-finetuned-conll02-dutch""": 512,
"""xlm-roberta-large-finetuned-conll02-spanish""": 512,
"""xlm-roberta-large-finetuned-conll03-english""": 512,
"""xlm-roberta-large-finetuned-conll03-german""": 512,
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self :Optional[Any] , lowercase_ :Optional[int] , lowercase_ :Optional[int]="<s>" , lowercase_ :Optional[Any]="</s>" , lowercase_ :Optional[int]="</s>" , lowercase_ :List[str]="<s>" , lowercase_ :Tuple="<unk>" , lowercase_ :Union[str, Any]="<pad>" , lowercase_ :int="<mask>" , lowercase_ :Optional[Dict[str, Any]] = None , **lowercase_ :Optional[int] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase = 1
UpperCAmelCase = len(self.sp_model ) + self.fairseq_offset
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :Optional[Any] ) -> List[str]:
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self :str , lowercase_ :Dict ) -> Dict:
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__ ( self :Any , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self :int , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None , lowercase_ :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[int]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[Any]:
UpperCAmelCase = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self :int , lowercase_ :str ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :Optional[Any] ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self :Dict , lowercase_ :Any ) -> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = ''.join(lowercase_ ).replace(lowercase_ , ' ' ).strip()
return out_string
def UpperCAmelCase__ ( self :Dict , lowercase_ :str , lowercase_ :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 78 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = x
UpperCAmelCase = y
for step in range(lowercase_ ): # noqa: B007
UpperCAmelCase = a * a - b * b + x
UpperCAmelCase = 2 * a * b + y
UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) )
def _lowerCAmelCase ( lowercase_ = 800 , lowercase_ = 600 , lowercase_ = -0.6 , lowercase_ = 0 , lowercase_ = 3.2 , lowercase_ = 50 , lowercase_ = True , ):
UpperCAmelCase = Image.new('RGB' , (image_width, image_height) )
UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(lowercase_ ):
for image_y in range(lowercase_ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase = figure_width / image_width * image_height
UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase = get_distance(lowercase_ , lowercase_ , lowercase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase = get_color_coded_rgb(lowercase_ )
else:
UpperCAmelCase = get_black_and_white_rgb(lowercase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
snake_case_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 78 | 1 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = x
UpperCAmelCase = y
for step in range(lowercase_ ): # noqa: B007
UpperCAmelCase = a * a - b * b + x
UpperCAmelCase = 2 * a * b + y
UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) )
def _lowerCAmelCase ( lowercase_ = 800 , lowercase_ = 600 , lowercase_ = -0.6 , lowercase_ = 0 , lowercase_ = 3.2 , lowercase_ = 50 , lowercase_ = True , ):
UpperCAmelCase = Image.new('RGB' , (image_width, image_height) )
UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(lowercase_ ):
for image_y in range(lowercase_ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase = figure_width / image_width * image_height
UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase = get_distance(lowercase_ , lowercase_ , lowercase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase = get_color_coded_rgb(lowercase_ )
else:
UpperCAmelCase = get_black_and_white_rgb(lowercase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
snake_case_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 78 |
"""simple docstring"""
import requests
snake_case_ = """""" # <-- Put your OpenWeatherMap appid here!
snake_case_ = """https://api.openweathermap.org/data/2.5/"""
def _lowerCAmelCase ( lowercase_ = "Chicago" , lowercase_ = APPID ):
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def _lowerCAmelCase ( lowercase_ = "Kolkata, India" , lowercase_ = APPID ):
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def _lowerCAmelCase ( lowercase_ = 5_5.6_8 , lowercase_ = 1_2.5_7 , lowercase_ = APPID ):
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
snake_case_ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 78 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :Optional[Any] , lowercase_ :List[str]=13 , lowercase_ :Tuple=30 , lowercase_ :str=2 , lowercase_ :Optional[int]=3 , lowercase_ :Dict=True , lowercase_ :List[str]=True , lowercase_ :str=32 , lowercase_ :Dict=5 , lowercase_ :Optional[int]=4 , lowercase_ :Optional[Any]=37 , lowercase_ :Dict="gelu" , lowercase_ :int=0.1 , lowercase_ :int=0.1 , lowercase_ :Union[str, Any]=10 , lowercase_ :str=0.02 , ) -> Optional[int]:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def UpperCAmelCase__ ( self :str ) -> List[str]:
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCAmelCase__ ( self :str , lowercase_ :Union[str, Any] , lowercase_ :List[Any] ) -> List[Any]:
UpperCAmelCase = FlaxViTModel(config=lowercase_ )
UpperCAmelCase = model(lowercase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (self.image_size, self.image_size)
UpperCAmelCase = (self.patch_size, self.patch_size)
UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Optional[int] , lowercase_ :Dict ) -> List[Any]:
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = FlaxViTForImageClassification(config=lowercase_ )
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = FlaxViTForImageClassification(lowercase_ )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(lowercase_ )
def UpperCAmelCase__ ( self :str ) -> Optional[int]:
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self :int ) -> None:
UpperCAmelCase = FlaxViTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :Dict ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase__ ( self :str ) -> Dict:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowercase_ )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCAmelCase__ ( self :str ) -> Optional[int]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowercase_ , lowercase_ )
UpperCAmelCase = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ :Tuple , **lowercase_ :Dict ):
return model(pixel_values=lowercase_ , **lowercase_ )
with self.subTest('JIT Enabled' ):
UpperCAmelCase = model_jitted(**lowercase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase__ ( self :List[Any] ) -> str:
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained('google/vit-base-patch16-224' )
UpperCAmelCase = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(lowercase_ )
| 78 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""image_processor""", """tokenizer"""]
__UpperCamelCase = """LayoutLMv2ImageProcessor"""
__UpperCamelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self :Any , lowercase_ :int=None , lowercase_ :Union[str, Any]=None , **lowercase_ :Optional[Any] ) -> Dict:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
UpperCAmelCase = kwargs.pop('feature_extractor' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :str , lowercase_ :Optional[int] , lowercase_ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ :Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ :Optional[Union[List[int], List[List[int]]]] = None , lowercase_ :bool = True , lowercase_ :Union[bool, str, PaddingStrategy] = False , lowercase_ :Union[bool, str, TruncationStrategy] = None , lowercase_ :Optional[int] = None , lowercase_ :int = 0 , lowercase_ :Optional[int] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[bool] = None , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = True , lowercase_ :Optional[Union[str, TensorType]] = None , **lowercase_ :Any , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase = features['words']
UpperCAmelCase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
UpperCAmelCase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
UpperCAmelCase = self.get_overflowing_images(lowercase_ , encoded_inputs['overflow_to_sample_mapping'] )
UpperCAmelCase = images
return encoded_inputs
def UpperCAmelCase__ ( self :Dict , lowercase_ :List[Any] , lowercase_ :Any ) -> Optional[Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(lowercase_ )} and {len(lowercase_ )}""" )
return images_with_overflow
def UpperCAmelCase__ ( self :Any , *lowercase_ :int , **lowercase_ :Tuple ) -> Tuple:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , *lowercase_ :List[Any] , **lowercase_ :Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ ( self :int ) -> Dict:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 78 | 1 |
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 78 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Union[str, Any] ) -> str:
UpperCAmelCase = {}
def UpperCAmelCase__ ( self :Any , lowercase_ :List[Any] , lowercase_ :List[str] , lowercase_ :Dict=1 ) -> List[Any]:
if self.graph.get(lowercase_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCAmelCase = [[w, v]]
if not self.graph.get(lowercase_ ):
UpperCAmelCase = []
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
return list(self.graph )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Optional[int] , lowercase_ :Optional[Any] ) -> Dict:
if self.graph.get(lowercase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Tuple=-2 , lowercase_ :List[Any]=-1 ) -> List[Any]:
if s == d:
return []
UpperCAmelCase = []
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return visited
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int=-1 ) -> Tuple:
if c == -1:
UpperCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(lowercase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase_ , lowercase_ , 1 )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :Optional[Any]=-2 ) -> Optional[int]:
UpperCAmelCase = deque()
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
d.append(lowercase_ )
visited.append(lowercase_ )
while d:
UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[int] ) -> List[Any]:
UpperCAmelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[str] ) -> List[str]:
return len(self.graph[u] )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Any=-2 ) -> int:
UpperCAmelCase = []
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return sorted_nodes
def UpperCAmelCase__ ( self :str ) -> str:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return list(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> Tuple:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return False
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int=-2 , lowercase_ :List[str]=-1 ) -> Any:
UpperCAmelCase = time()
self.dfs(lowercase_ , lowercase_ )
UpperCAmelCase = time()
return end - begin
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[str]=-2 ) -> str:
UpperCAmelCase = time()
self.bfs(lowercase_ )
UpperCAmelCase = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :List[str] ) -> Union[str, Any]:
UpperCAmelCase = {}
def UpperCAmelCase__ ( self :str , lowercase_ :Dict , lowercase_ :Optional[Any] , lowercase_ :Optional[int]=1 ) -> Dict:
# check if the u exists
if self.graph.get(lowercase_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCAmelCase = [[w, v]]
# add the other way
if self.graph.get(lowercase_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCAmelCase = [[w, u]]
def UpperCAmelCase__ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :Tuple ) -> Optional[Any]:
if self.graph.get(lowercase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_ )
# the other way round
if self.graph.get(lowercase_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Optional[int]=-2 , lowercase_ :Optional[int]=-1 ) -> List[str]:
if s == d:
return []
UpperCAmelCase = []
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return visited
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Optional[int]=-1 ) -> Any:
if c == -1:
UpperCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(lowercase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase_ , lowercase_ , 1 )
def UpperCAmelCase__ ( self :Dict , lowercase_ :int=-2 ) -> int:
UpperCAmelCase = deque()
UpperCAmelCase = []
if s == -2:
UpperCAmelCase = list(self.graph )[0]
d.append(lowercase_ )
visited.append(lowercase_ )
while d:
UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[Any] ) -> str:
return len(self.graph[u] )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Any:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return list(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
UpperCAmelCase = -2
UpperCAmelCase = []
UpperCAmelCase = s
UpperCAmelCase = False
UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase = len(lowercase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase = True
if len(lowercase_ ) != 0:
UpperCAmelCase = stack[len(lowercase_ ) - 1]
else:
UpperCAmelCase = False
indirect_parents.append(lowercase_ )
UpperCAmelCase = s
UpperCAmelCase = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return False
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Union[str, Any]:
return list(self.graph )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Union[str, Any]=-2 , lowercase_ :List[str]=-1 ) -> str:
UpperCAmelCase = time()
self.dfs(lowercase_ , lowercase_ )
UpperCAmelCase = time()
return end - begin
def UpperCAmelCase__ ( self :Any , lowercase_ :int=-2 ) -> str:
UpperCAmelCase = time()
self.bfs(lowercase_ )
UpperCAmelCase = time()
return end - begin
| 78 | 1 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
snake_case_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class A_ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCamelCase = None
__UpperCamelCase = "utf-8"
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = True # deprecated
__UpperCamelCase = None # deprecated
__UpperCamelCase = 10 << 20 # 10MB
__UpperCamelCase = None
class A_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCamelCase = JsonConfig
def UpperCAmelCase__ ( self :Optional[int] ) -> Any:
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
UpperCAmelCase = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self :str , lowercase_ :str ) -> Union[str, Any]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowercase_ , (str, list, tuple) ):
UpperCAmelCase = data_files
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [files]
UpperCAmelCase = [dl_manager.iter_files(lowercase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
UpperCAmelCase = []
for split_name, files in data_files.items():
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [files]
UpperCAmelCase = [dl_manager.iter_files(lowercase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowercase_ , gen_kwargs={'files': files} ) )
return splits
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCAmelCase = self.config.features.arrow_schema.field(lowercase_ ).type
UpperCAmelCase = pa_table.append_column(lowercase_ , pa.array([None] * len(lowercase_ ) , type=lowercase_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase = table_cast(lowercase_ , self.config.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Tuple ) -> Union[str, Any]:
for file_idx, file in enumerate(itertools.chain.from_iterable(lowercase_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowercase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase = json.load(lowercase_ )
# We keep only the field we are interested in
UpperCAmelCase = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowercase_ , (list, tuple) ):
UpperCAmelCase = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase = {col: [row.get(lowercase_ ) for row in dataset] for col in keys}
else:
UpperCAmelCase = dataset
UpperCAmelCase = pa.Table.from_pydict(lowercase_ )
yield file_idx, self._cast_table(lowercase_ )
# If the file has one json object per line
else:
with open(lowercase_ , 'rb' ) as f:
UpperCAmelCase = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCAmelCase = max(self.config.chunksize // 32 , 16 << 10 )
UpperCAmelCase = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
UpperCAmelCase = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowercase_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCAmelCase = batch.decode(self.config.encoding , errors=lowercase_ ).encode('utf-8' )
try:
while True:
try:
UpperCAmelCase = paj.read_json(
io.BytesIO(lowercase_ ) , read_options=paj.ReadOptions(block_size=lowercase_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowercase_ , pa.ArrowInvalid )
and "straddling" not in str(lowercase_ )
or block_size > len(lowercase_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(lowercase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowercase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase = json.load(lowercase_ )
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(lowercase_ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowercase_ , lowercase_ ): # list is the only sequence type supported in JSON
try:
UpperCAmelCase = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase = {col: [row.get(lowercase_ ) for row in dataset] for col in keys}
UpperCAmelCase = pa.Table.from_pydict(lowercase_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowercase_ )}: {e}""" )
raise ValueError(f"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(lowercase_ )
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(lowercase_ )}: {e}""" )
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowercase_ )
batch_idx += 1
| 78 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 78 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :Dict , lowercase_ :Optional[int]=13 , lowercase_ :Tuple=7 , lowercase_ :Optional[Any]=True , lowercase_ :Dict=True , lowercase_ :Dict=True , lowercase_ :Any=True , lowercase_ :Union[str, Any]=99 , lowercase_ :Dict=32 , lowercase_ :str=5 , lowercase_ :Optional[Any]=4 , lowercase_ :Optional[int]=37 , lowercase_ :str="gelu" , lowercase_ :Optional[int]=0.1 , lowercase_ :str=0.1 , lowercase_ :Optional[Any]=5_12 , lowercase_ :str=16 , lowercase_ :int=2 , lowercase_ :Optional[int]=0.02 , lowercase_ :Optional[Any]=False , lowercase_ :int=True , lowercase_ :Tuple="None" , lowercase_ :Dict=3 , lowercase_ :int=4 , lowercase_ :Union[str, Any]=None , ) -> Optional[int]:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = relative_attention
UpperCAmelCase = position_biased_input
UpperCAmelCase = pos_att_type
UpperCAmelCase = scope
def UpperCAmelCase__ ( self :str ) -> List[str]:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self :Dict ) -> List[str]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]:
UpperCAmelCase = self.get_config()
UpperCAmelCase = 3_00
return config
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str ) -> int:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :Any , lowercase_ :Union[str, Any] , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :List[str] ) -> Any:
UpperCAmelCase = DebertaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )[0]
UpperCAmelCase = model(lowercase_ , token_type_ids=lowercase_ )[0]
UpperCAmelCase = model(lowercase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Tuple , lowercase_ :Optional[Any] , lowercase_ :List[str] , lowercase_ :Union[str, Any] , lowercase_ :Optional[Any] , lowercase_ :Dict , lowercase_ :List[Any] ) -> Optional[int]:
UpperCAmelCase = DebertaForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Tuple , lowercase_ :Dict , lowercase_ :int , lowercase_ :Tuple , lowercase_ :Union[str, Any] , lowercase_ :Tuple , lowercase_ :str ) -> Dict:
UpperCAmelCase = self.num_labels
UpperCAmelCase = DebertaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Union[str, Any] , lowercase_ :str , lowercase_ :str , lowercase_ :int ) -> int:
UpperCAmelCase = self.num_labels
UpperCAmelCase = DebertaForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self :int , lowercase_ :Optional[Any] , lowercase_ :int , lowercase_ :str , lowercase_ :List[Any] , lowercase_ :Tuple , lowercase_ :Tuple , lowercase_ :Optional[int] ) -> Any:
UpperCAmelCase = DebertaForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self :Dict ) -> int:
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def UpperCAmelCase__ ( self :str ) -> Any:
UpperCAmelCase = DebertaModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCAmelCase__ ( self :Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :str ) -> int:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[int]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase_ )
@slow
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = DebertaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[int]:
pass
@slow
def UpperCAmelCase__ ( self :List[str] ) -> List[Any]:
UpperCAmelCase = DebertaModel.from_pretrained('microsoft/deberta-base' )
UpperCAmelCase = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ )[0]
# compare the actual values for a slice.
UpperCAmelCase = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase_ , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 78 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ , lowercase_ = " " ):
UpperCAmelCase = []
UpperCAmelCase = 0
for index, char in enumerate(lowercase_ ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase = index + 1
elif index + 1 == len(lowercase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 78 | 1 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@require_torch
def UpperCAmelCase__ ( self :Any ) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
UpperCAmelCase = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
UpperCAmelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
UpperCAmelCase = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowercase_ )
BertModel.from_pretrained(lowercase_ )
BertTokenizer.from_pretrained(lowercase_ )
pipeline(task='fill-mask' , model=lowercase_ )
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
UpperCAmelCase = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = '1'
UpperCAmelCase = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def UpperCAmelCase__ ( self :Dict ) -> List[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
UpperCAmelCase = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
UpperCAmelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
UpperCAmelCase = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowercase_ )
BertModel.from_pretrained(lowercase_ )
BertTokenizer.from_pretrained(lowercase_ )
pipeline(task='fill-mask' , model=lowercase_ )
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[int]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
UpperCAmelCase = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
UpperCAmelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = '1'
UpperCAmelCase = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def UpperCAmelCase__ ( self :List[str] ) -> Any:
UpperCAmelCase = '\nfrom transformers import pipeline\n '
UpperCAmelCase = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
UpperCAmelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
UpperCAmelCase = self.get_env()
UpperCAmelCase = '1'
UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, mock, run] )]
UpperCAmelCase = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def UpperCAmelCase__ ( self :Optional[Any] ) -> int:
UpperCAmelCase = '\nfrom transformers import AutoModel\n '
UpperCAmelCase = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = '1'
UpperCAmelCase = subprocess.run(lowercase_ , env=lowercase_ , check=lowercase_ , capture_output=lowercase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 78 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
snake_case_ = logging.getLogger(__name__)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = git.Repo(search_parent_directories=lowercase_ )
UpperCAmelCase = {
'repo_id': str(lowercase_ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , 'git_log.json' ) , 'w' ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def _lowerCAmelCase ( lowercase_ ):
if params.n_gpu <= 0:
UpperCAmelCase = 0
UpperCAmelCase = -1
UpperCAmelCase = True
UpperCAmelCase = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCAmelCase = int(os.environ['WORLD_SIZE'] )
UpperCAmelCase = int(os.environ['N_GPU_NODE'] )
UpperCAmelCase = int(os.environ['RANK'] )
# number of nodes / node ID
UpperCAmelCase = params.world_size // params.n_gpu_per_node
UpperCAmelCase = params.global_rank // params.n_gpu_per_node
UpperCAmelCase = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 1
UpperCAmelCase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCAmelCase = params.node_id == 0 and params.local_rank == 0
UpperCAmelCase = params.n_nodes > 1
# summary
UpperCAmelCase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _lowerCAmelCase ( lowercase_ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 78 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( lowercase_ :ArgumentParser ) -> List[Any]:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self :Any ) -> List[Any]:
raise NotImplementedError()
| 78 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
snake_case_ = """1"""
snake_case_ = """0"""
snake_case_ = """1"""
snake_case_ = ort.SessionOptions()
snake_case_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
snake_case_ = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
snake_case_ = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
snake_case_ = ort.RunOptions()
snake_case_ = 128
snake_case_ = 1
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
snake_case_ = time.time()
snake_case_ = 2000
snake_case_ = {}
for iter in range(max_iters):
snake_case_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1000 / max_iters))
| 78 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """beit"""
def __init__( self :List[str] , lowercase_ :List[Any]=81_92 , lowercase_ :str=7_68 , lowercase_ :List[str]=12 , lowercase_ :Optional[int]=12 , lowercase_ :Dict=30_72 , lowercase_ :Tuple="gelu" , lowercase_ :Any=0.0 , lowercase_ :Optional[int]=0.0 , lowercase_ :Dict=0.02 , lowercase_ :int=1E-12 , lowercase_ :List[Any]=2_24 , lowercase_ :Dict=16 , lowercase_ :List[Any]=3 , lowercase_ :List[str]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Union[str, Any]=0.1 , lowercase_ :str=0.1 , lowercase_ :str=True , lowercase_ :List[str]=[3, 5, 7, 11] , lowercase_ :Optional[int]=[1, 2, 3, 6] , lowercase_ :str=True , lowercase_ :int=0.4 , lowercase_ :Union[str, Any]=2_56 , lowercase_ :int=1 , lowercase_ :Tuple=False , lowercase_ :Optional[int]=2_55 , **lowercase_ :str , ) -> Any:
super().__init__(**lowercase_ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = use_mask_token
UpperCAmelCase = use_absolute_position_embeddings
UpperCAmelCase = use_relative_position_bias
UpperCAmelCase = use_shared_relative_position_bias
UpperCAmelCase = layer_scale_init_value
UpperCAmelCase = drop_path_rate
UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase = out_indices
UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase = use_auxiliary_head
UpperCAmelCase = auxiliary_loss_weight
UpperCAmelCase = auxiliary_channels
UpperCAmelCase = auxiliary_num_convs
UpperCAmelCase = auxiliary_concat_input
UpperCAmelCase = semantic_loss_ignore_index
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self :Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase__ ( self :Tuple ) -> float:
return 1E-4
| 78 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case_ = logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""pixel_values"""]
def __init__( self :int , lowercase_ :bool = True , lowercase_ :Dict[str, int] = None , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :bool = True , lowercase_ :Union[int, float] = 1 / 2_55 , lowercase_ :bool = True , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :bool = True , **lowercase_ :Union[str, Any] , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase = size if size is not None else {'height': 3_84, 'width': 3_84}
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase = do_convert_rgb
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :np.ndarray , lowercase_ :Dict[str, int] , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Any , ) -> np.ndarray:
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
UpperCAmelCase = (size['height'], size['width'])
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :np.ndarray , lowercase_ :Union[int, float] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[int] , ) -> int:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :np.ndarray , lowercase_ :Union[float, List[float]] , lowercase_ :Union[float, List[float]] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[Any] , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :ImageInput , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Dict[str, int]] = None , lowercase_ :PILImageResampling = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[float] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[str, TensorType]] = None , lowercase_ :bool = None , lowercase_ :ChannelDimension = ChannelDimension.FIRST , **lowercase_ :Tuple , ) -> PIL.Image.Image:
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase_ )
return encoded_outputs
| 78 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """transfo-xl"""
__UpperCamelCase = ["""mems"""]
__UpperCamelCase = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :List[Any] , lowercase_ :Optional[int]=26_77_35 , lowercase_ :Union[str, Any]=[2_00_00, 4_00_00, 20_00_00] , lowercase_ :List[Any]=10_24 , lowercase_ :Optional[Any]=10_24 , lowercase_ :Tuple=16 , lowercase_ :Tuple=64 , lowercase_ :Any=40_96 , lowercase_ :int=4 , lowercase_ :List[str]=False , lowercase_ :Union[str, Any]=18 , lowercase_ :Optional[Any]=16_00 , lowercase_ :Dict=10_00 , lowercase_ :Optional[int]=True , lowercase_ :Tuple=True , lowercase_ :Dict=0 , lowercase_ :Tuple=-1 , lowercase_ :Optional[int]=True , lowercase_ :Optional[int]=0.1 , lowercase_ :str=0.0 , lowercase_ :List[str]=True , lowercase_ :int="normal" , lowercase_ :Dict=0.01 , lowercase_ :Optional[Any]=0.01 , lowercase_ :Dict=0.02 , lowercase_ :Tuple=1E-5 , lowercase_ :str=0 , **lowercase_ :Tuple , ) -> List[str]:
UpperCAmelCase = vocab_size
UpperCAmelCase = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
UpperCAmelCase = [False] + [True] * len(self.cutoffs )
else:
UpperCAmelCase = [False] + [False] * len(self.cutoffs )
UpperCAmelCase = d_model
UpperCAmelCase = d_embed
UpperCAmelCase = d_head
UpperCAmelCase = d_inner
UpperCAmelCase = div_val
UpperCAmelCase = pre_lnorm
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = mem_len
UpperCAmelCase = same_length
UpperCAmelCase = attn_type
UpperCAmelCase = clamp_len
UpperCAmelCase = sample_softmax
UpperCAmelCase = adaptive
UpperCAmelCase = dropout
UpperCAmelCase = dropatt
UpperCAmelCase = untie_r
UpperCAmelCase = init
UpperCAmelCase = init_range
UpperCAmelCase = proj_init_std
UpperCAmelCase = init_std
UpperCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Any:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any ) -> Tuple:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 78 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """beit"""
def __init__( self :List[str] , lowercase_ :List[Any]=81_92 , lowercase_ :str=7_68 , lowercase_ :List[str]=12 , lowercase_ :Optional[int]=12 , lowercase_ :Dict=30_72 , lowercase_ :Tuple="gelu" , lowercase_ :Any=0.0 , lowercase_ :Optional[int]=0.0 , lowercase_ :Dict=0.02 , lowercase_ :int=1E-12 , lowercase_ :List[Any]=2_24 , lowercase_ :Dict=16 , lowercase_ :List[Any]=3 , lowercase_ :List[str]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Union[str, Any]=0.1 , lowercase_ :str=0.1 , lowercase_ :str=True , lowercase_ :List[str]=[3, 5, 7, 11] , lowercase_ :Optional[int]=[1, 2, 3, 6] , lowercase_ :str=True , lowercase_ :int=0.4 , lowercase_ :Union[str, Any]=2_56 , lowercase_ :int=1 , lowercase_ :Tuple=False , lowercase_ :Optional[int]=2_55 , **lowercase_ :str , ) -> Any:
super().__init__(**lowercase_ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = use_mask_token
UpperCAmelCase = use_absolute_position_embeddings
UpperCAmelCase = use_relative_position_bias
UpperCAmelCase = use_shared_relative_position_bias
UpperCAmelCase = layer_scale_init_value
UpperCAmelCase = drop_path_rate
UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase = out_indices
UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase = use_auxiliary_head
UpperCAmelCase = auxiliary_loss_weight
UpperCAmelCase = auxiliary_channels
UpperCAmelCase = auxiliary_num_convs
UpperCAmelCase = auxiliary_concat_input
UpperCAmelCase = semantic_loss_ignore_index
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self :Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase__ ( self :Tuple ) -> float:
return 1E-4
| 78 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ = """▁"""
snake_case_ = {"""vocab_file""": """spiece.model"""}
snake_case_ = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
snake_case_ = {
"""google/pegasus-xsum""": 512,
}
snake_case_ = logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self :Optional[Any] , lowercase_ :List[Any] , lowercase_ :List[Any]="<pad>" , lowercase_ :Optional[int]="</s>" , lowercase_ :Any="<unk>" , lowercase_ :Dict="<mask_2>" , lowercase_ :Dict="<mask_1>" , lowercase_ :Union[str, Any]=None , lowercase_ :Optional[Any]=1_03 , lowercase_ :Optional[Dict[str, Any]] = None , **lowercase_ :Optional[Any] , ) -> None:
UpperCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(lowercase_ )}, but is"""
f""" {type(lowercase_ )}""" )
UpperCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
UpperCAmelCase = additional_special_tokens_extended
else:
UpperCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , pad_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase = mask_token_sent
UpperCAmelCase = vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
# add special tokens to encoder dict
UpperCAmelCase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
UpperCAmelCase = {v: k for k, v in self.encoder.items()}
@property
def UpperCAmelCase__ ( self :List[str] ) -> int:
return len(self.sp_model ) + self.offset
def UpperCAmelCase__ ( self :int ) -> Dict[str, int]:
UpperCAmelCase = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :str ) -> Any:
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self :Any , lowercase_ :List[Any] ) -> Optional[Any]:
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
UpperCAmelCase = self.sp_model.piece_to_id(lowercase_ )
return sp_id + self.offset
def UpperCAmelCase__ ( self :Dict , lowercase_ :int ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
UpperCAmelCase = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCAmelCase__ ( self :Dict , lowercase_ :Optional[Any] ) -> Tuple:
UpperCAmelCase = []
UpperCAmelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
UpperCAmelCase = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Union[str, Any]=False ) -> Any:
return 1
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[str] ) -> int:
UpperCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :List , lowercase_ :Optional[List] = None , lowercase_ :bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[Any] , lowercase_ :str=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self :Any , lowercase_ :str , lowercase_ :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 78 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case_ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.