code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__A : str = logging.get_logger(__name__) # pylint: disable=invalid-name
__A : int = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def lowercase ( __snake_case : Any , __snake_case : int , __snake_case : Dict=8 ):
lowercase_ : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCAmelCase ( _A ):
def __init__( self : List[Any] , A : UNetaDConditionModel , A : DDPMScheduler , A : VQModel , ) -> Any:
super().__init__()
self.register_modules(
unet=A , scheduler=A , movq=A , )
lowercase_ : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A ( self : Optional[Any] , A : Optional[Any] , A : Any , A : List[str] , A : Optional[int] , A : Optional[int] , A : List[str] ) -> List[str]:
if latents is None:
lowercase_ : str = randn_tensor(A , generator=A , device=A , dtype=A )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase_ : List[Any] = latents.to(A )
lowercase_ : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A ( self : Optional[Any] , A : Union[str, Any]=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase_ : str = torch.device(F'''cuda:{gpu_id}''' )
lowercase_ : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A , A )
def A ( self : Union[str, Any] , A : Optional[int]=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
lowercase_ : List[Any] = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Any = cpu_offload_with_hook(A , A , prev_module_hook=A )
# We'll offload the last model manually.
lowercase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A ( self : str ) -> str:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A )
def __call__( self : List[Any] , A : Union[torch.FloatTensor, List[torch.FloatTensor]] , A : Union[torch.FloatTensor, List[torch.FloatTensor]] , A : torch.FloatTensor , A : int = 5_12 , A : int = 5_12 , A : int = 1_00 , A : float = 4.0 , A : int = 1 , A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , ) -> List[Any]:
lowercase_ : List[str] = self._execution_device
lowercase_ : Optional[Any] = guidance_scale > 1.0
if isinstance(A , A ):
lowercase_ : str = torch.cat(A , dim=0 )
if isinstance(A , A ):
lowercase_ : Optional[int] = torch.cat(A , dim=0 )
if isinstance(A , A ):
lowercase_ : Optional[Any] = torch.cat(A , dim=0 )
lowercase_ : List[str] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
lowercase_ : Any = image_embeds.repeat_interleave(A , dim=0 )
lowercase_ : Tuple = negative_image_embeds.repeat_interleave(A , dim=0 )
lowercase_ : Optional[int] = hint.repeat_interleave(A , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A )
lowercase_ : List[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A )
self.scheduler.set_timesteps(A , device=A )
lowercase_ : Optional[Any] = self.scheduler.timesteps
lowercase_ : Union[str, Any] = self.movq.config.latent_channels
lowercase_ , lowercase_ : Any = downscale_height_and_width(A , A , self.movq_scale_factor )
# create initial latent
lowercase_ : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A , A , A , self.scheduler , )
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Dict = {'''image_embeds''': image_embeds, '''hint''': hint}
lowercase_ : Dict = self.unet(
sample=A , timestep=A , encoder_hidden_states=A , added_cond_kwargs=A , return_dict=A , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : List[str] = variance_pred.chunk(2 )
lowercase_ : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Union[str, Any] = self.scheduler.step(
A , A , A , generator=A , )[0]
# post-processing
lowercase_ : List[Any] = self.movq.decode(A , force_not_quantize=A )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase_ : List[Any] = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 , 1 )
lowercase_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 33
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowercase ( __snake_case : str , __snake_case : str , __snake_case : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
lowercase_ : Union[str, Any] = quote(__snake_case )
return hfh.hf_hub_url(__snake_case , __snake_case , repo_type='''dataset''' , revision=__snake_case )
| 33
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 83
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 1000 ) -> int:
'''simple docstring'''
snake_case : Dict = 1
snake_case : str = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE__ , digit + 1 ):
snake_case : list[int] = []
snake_case : Optional[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE__ ):
snake_case : Dict = len(SCREAMING_SNAKE_CASE__ )
snake_case : List[str] = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE__ )
snake_case : Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83
| 1
|
from sklearn.metrics import matthews_corrcoef
import datasets
lowercase__ :Tuple = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
lowercase__ :str = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results[\'matthews_correlation\'], 2))\n -0.25\n"
lowercase__ :int = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def A__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int32'''),
'''references''': datasets.Value('''int32'''),
}) ,reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] ,)
def A__ ( self ,A__ ,A__ ,A__=None):
return {
"matthews_correlation": float(matthews_corrcoef(lowerCAmelCase_ ,lowerCAmelCase_ ,sample_weight=lowerCAmelCase_)),
}
| 101
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def snake_case ( snake_case__ :int , snake_case__ :List[str] , snake_case__ :Union[str, Any]) -> str:
# Initialise PyTorch model
_A = AlbertConfig.from_json_file(snake_case__)
print(F'''Building PyTorch model from configuration: {config}''')
_A = AlbertForPreTraining(snake_case__)
# Load weights from tf checkpoint
load_tf_weights_in_albert(snake_case__ , snake_case__ , snake_case__)
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 180
| 0
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__SCREAMING_SNAKE_CASE = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
__SCREAMING_SNAKE_CASE = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
__SCREAMING_SNAKE_CASE = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
__SCREAMING_SNAKE_CASE = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
__SCREAMING_SNAKE_CASE = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
__SCREAMING_SNAKE_CASE = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
a__ = DPRContextEncoderTokenizer
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a__ = DPRQuestionEncoderTokenizer
__SCREAMING_SNAKE_CASE = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
__SCREAMING_SNAKE_CASE = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
__SCREAMING_SNAKE_CASE = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(_A )
class lowerCamelCase_ :
'''simple docstring'''
def __call__( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : int , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
elif titles is None or texts is None:
A : List[str] = titles if texts is None else texts
return super().__call__(
__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
A : Any = titles if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [titles]
A : str = texts if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [texts]
A : Dict = len(__lowerCamelCase )
A : List[Any] = questions if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [questions] * n_passages
assert len(__lowerCamelCase ) == len(
__lowerCamelCase ), F"""There should be as many titles than texts but got {len(__lowerCamelCase )} titles and {len(__lowerCamelCase )} texts."""
A : Optional[Any] = super().__call__(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
A : str = super().__call__(__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
A : Union[str, Any] = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCamelCase , __lowerCamelCase )
]
}
if return_attention_mask is not False:
A : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A : Optional[Any] = attention_mask
return self.pad(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : BatchEncoding , __lowerCamelCase : DPRReaderOutput , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 4 , ) -> List[DPRSpanPrediction]:
A : Optional[int] = reader_input["input_ids"]
A , A , A : Any = reader_output[:3]
A : str = len(__lowerCamelCase )
A : Union[str, Any] = sorted(range(__lowerCamelCase ) , reverse=__lowerCamelCase , key=relevance_logits.__getitem__ )
A : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
A : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A : Optional[int] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A : Optional[int] = sequence_ids.index(self.pad_token_id )
else:
A : int = len(__lowerCamelCase )
A : Optional[int] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCamelCase , top_spans=__lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCamelCase , start_index=__lowerCamelCase , end_index=__lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] , __lowerCamelCase : int , __lowerCamelCase : int , ) -> List[DPRSpanPrediction]:
A : Any = []
for start_index, start_score in enumerate(__lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A : Tuple = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase )
A : str = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
A : Union[str, Any] = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_A )
class lowerCamelCase_ ( _A ,_A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = READER_PRETRAINED_VOCAB_FILES_MAP
a__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = READER_PRETRAINED_INIT_CONFIGURATION
a__ = ["input_ids", "attention_mask"]
a__ = DPRReaderTokenizer
| 256
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def UpperCAmelCase ( _lowerCamelCase ):
A : Any = []
for line in lines:
A : List[str] = re.sub(R"#.*" , "" , _lowerCamelCase ) # remove comments
if line:
filtered_lines.append(_lowerCamelCase )
A : str = "\n".join(_lowerCamelCase )
# Make a hash from all this code
A : Any = full_str.encode("utf-8" )
return shaaaa(_lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
__SCREAMING_SNAKE_CASE = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__SCREAMING_SNAKE_CASE = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__SCREAMING_SNAKE_CASE = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
__SCREAMING_SNAKE_CASE = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 256
| 1
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class A ( UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = XLMProphetNetTokenizer
A__ = False
A__ = True
def lowerCamelCase__ (self : str ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XLMProphetNetTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """[PAD]"""
lowercase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def lowerCamelCase__ (self : List[str] ) -> Any:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """[PAD]""" )
self.assertEqual(vocab_keys[1] , """[CLS]""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__a ) , 1012 )
def lowerCamelCase__ (self : Any ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def lowerCamelCase__ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = XLMProphetNetTokenizer(__a , keep_accents=__a )
lowercase__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
] , )
@cached_property
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def lowerCamelCase__ (self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = """Hello World!"""
lowercase__ = [3_5389, 6672, 49, 2]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def lowerCamelCase__ (self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ = {"""input_ids""": [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
| 305
|
'''simple docstring'''
from __future__ import annotations
import queue
class __A :
def __init__(self : Optional[Any] , __a : str ):
UpperCAmelCase_ = data
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCAmelCase_ ( ) -> TreeNode:
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n" )
UpperCAmelCase_ = input("Enter the value of the root node: " ).strip().lower()
UpperCAmelCase_ = queue.Queue()
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
UpperCAmelCase_ = f"""Enter the left node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = left_node
q.put(snake_case_ )
UpperCAmelCase_ = f"""Enter the right node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = right_node
q.put(snake_case_ )
raise
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = []
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase_ = stack.pop()
# start to traverse its right child
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n:
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
UpperCAmelCase_ = stack.pop()
print(n.data , end="," )
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ , UpperCAmelCase_ = [], []
UpperCAmelCase_ = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def lowerCAmelCase_ ( snake_case_ : str = "" , snake_case_ : Any=50 , snake_case_ : Union[str, Any]="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(snake_case_ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
SCREAMING_SNAKE_CASE_: TreeNode =build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 1
| 0
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] )-> Union[str, Any]:
lowerCamelCase__ : Any =''''''
lowerCamelCase__ : Tuple =''''''
lowerCamelCase__ : Any =[]
lowerCamelCase__ : int =0
lowerCamelCase__ : Optional[Any] =256
lowerCamelCase__ : Dict =0
lowerCamelCase__ : List[str] =0
lowerCamelCase__ : Tuple =0
lowerCamelCase__ : Any =0
def snake_case ( self : Optional[Any], lowerCamelCase : Optional[int] )-> Tuple:
lowerCamelCase__ : Tuple =cva.imread(lowerCamelCase, 0 )
lowerCamelCase__ : int =copy.deepcopy(self.img )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] =plt.hist(self.img.ravel(), 256, [0, 256], label='''x''' )
lowerCamelCase__ : List[str] =np.sum(lowerCamelCase )
for i in range(len(lowerCamelCase ) ):
lowerCamelCase__ : int =x[i] / self.k
self.sk += prk
lowerCamelCase__ : Tuple =(self.L - 1) * self.sk
if self.rem != 0:
lowerCamelCase__ : Any =int(last % last )
lowerCamelCase__ : Tuple =int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCamelCase )
lowerCamelCase__ : Dict =int(np.ma.count(self.img ) / self.img[1].size )
lowerCamelCase__ : Tuple =self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCamelCase__ : str =self.img[j][i]
if num != self.last_list[num]:
lowerCamelCase__ : int =self.last_list[num]
cva.imwrite('''output_data/output.jpg''', self.img )
def snake_case ( self : Optional[Any] )-> Union[str, Any]:
plt.hist(self.img.ravel(), 256, [0, 256] )
def snake_case ( self : List[str] )-> List[str]:
cva.imshow('''Output-Image''', self.img )
cva.imshow('''Input-Image''', self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_lowercase : Tuple = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
_lowercase : Optional[int] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 272
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_lowercase : Dict = logging.get_logger(__name__)
_lowercase : int = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'blenderbot-small'
_a = ['past_key_values']
_a = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Tuple, lowerCamelCase : Any=5_0265, lowerCamelCase : Optional[Any]=512, lowerCamelCase : Union[str, Any]=8, lowerCamelCase : Dict=2048, lowerCamelCase : str=16, lowerCamelCase : List[Any]=8, lowerCamelCase : List[str]=2048, lowerCamelCase : int=16, lowerCamelCase : Any=0.0, lowerCamelCase : Dict=0.0, lowerCamelCase : Tuple=True, lowerCamelCase : Tuple=True, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Tuple=512, lowerCamelCase : Tuple=0.1, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : List[str]=0.02, lowerCamelCase : Any=1, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Any=0, lowerCamelCase : Tuple=1, lowerCamelCase : Tuple=2, lowerCamelCase : Dict=2, **lowerCamelCase : Any, )-> Dict:
lowerCamelCase__ : Dict =vocab_size
lowerCamelCase__ : Dict =max_position_embeddings
lowerCamelCase__ : Optional[Any] =d_model
lowerCamelCase__ : Union[str, Any] =encoder_ffn_dim
lowerCamelCase__ : Optional[Any] =encoder_layers
lowerCamelCase__ : Any =encoder_attention_heads
lowerCamelCase__ : Union[str, Any] =decoder_ffn_dim
lowerCamelCase__ : Optional[int] =decoder_layers
lowerCamelCase__ : Any =decoder_attention_heads
lowerCamelCase__ : Optional[int] =dropout
lowerCamelCase__ : str =attention_dropout
lowerCamelCase__ : Union[str, Any] =activation_dropout
lowerCamelCase__ : Tuple =activation_function
lowerCamelCase__ : str =init_std
lowerCamelCase__ : List[Any] =encoder_layerdrop
lowerCamelCase__ : List[str] =decoder_layerdrop
lowerCamelCase__ : Tuple =use_cache
lowerCamelCase__ : Optional[Any] =encoder_layers
lowerCamelCase__ : List[Any] =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, is_encoder_decoder=lowerCamelCase, decoder_start_token_id=lowerCamelCase, forced_eos_token_id=lowerCamelCase, **lowerCamelCase, )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def snake_case ( self : Optional[int] )-> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : int =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCamelCase__ : List[str] ={0: '''batch'''}
lowerCamelCase__ : Tuple ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCamelCase__ : Union[str, Any] ={0: '''batch''', 1: '''decoder_sequence'''}
lowerCamelCase__ : Tuple ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase, direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__ : Optional[Any] =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.num_layers
for i in range(lowerCamelCase ):
lowerCamelCase__ : Optional[Any] ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCamelCase__ : Optional[int] ={0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowerCamelCase__ : str =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def snake_case ( self : Dict )-> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict =super().outputs
else:
lowerCamelCase__ : Optional[Any] =super(lowerCamelCase, self ).outputs
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : str =self.num_layers
for i in range(lowerCamelCase ):
lowerCamelCase__ : Tuple ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCamelCase__ : Dict ={0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def snake_case ( self : List[str], lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, )-> Mapping[str, Any]:
lowerCamelCase__ : Any =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Generate decoder inputs
lowerCamelCase__ : str =seq_length if not self.use_past else 1
lowerCamelCase__ : Tuple =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : str ={F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__ : Tuple =dict(**lowerCamelCase, **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : List[Any] =common_inputs['''input_ids'''].shape
lowerCamelCase__ : Optional[Any] =common_inputs['''decoder_input_ids'''].shape[1]
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.num_attention_heads
lowerCamelCase__ : str =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : List[Any] =decoder_seq_length + 3
lowerCamelCase__ : Optional[int] =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__ : Optional[Any] =torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase, lowerCamelCase )], dim=1 )
lowerCamelCase__ : Tuple =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ , lowerCamelCase__ : int =self.num_layers
lowerCamelCase__ : Any =min(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any =max(lowerCamelCase, lowerCamelCase ) - min_num_layers
lowerCamelCase__ : Dict ='''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
lowerCamelCase__ : Union[str, Any] =encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase, lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def snake_case ( self : List[str], lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, )-> Mapping[str, Any]:
lowerCamelCase__ : int =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : List[str] =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Union[str, Any] =seqlen + 2
lowerCamelCase__ , lowerCamelCase__ : int =self.num_layers
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.num_attention_heads
lowerCamelCase__ : int =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : str =common_inputs['''attention_mask'''].dtype
lowerCamelCase__ : int =torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase, lowerCamelCase, dtype=lowerCamelCase )], dim=1 )
lowerCamelCase__ : str =[
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def snake_case ( self : Optional[int], lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, )-> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase__ : int =compute_effective_axis_dimension(
lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ : Optional[int] =tokenizer.num_special_tokens_to_add(lowerCamelCase )
lowerCamelCase__ : Optional[int] =compute_effective_axis_dimension(
lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ : Optional[int] =[''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase__ : Optional[Any] =dict(tokenizer(lowerCamelCase, return_tensors=lowerCamelCase ) )
return common_inputs
def snake_case ( self : List[Any], lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, )-> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Union[str, Any] =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase )
elif self.task == "causal-lm":
lowerCamelCase__ : Union[str, Any] =self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase )
else:
lowerCamelCase__ : List[Any] =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase )
return common_inputs
def snake_case ( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Any )-> str:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Any =super()._flatten_past_key_values_(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase__ : List[str] =super(lowerCamelCase, self )._flatten_past_key_values_(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
| 272
| 1
|
'''simple docstring'''
from collections.abc import Callable
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , __lowercase : Callable | None = None ) -> None:
# Stores actual heap items.
SCREAMING_SNAKE_CASE__ : list =[]
# Stores indexes of each item for supporting updates and deletion.
SCREAMING_SNAKE_CASE__ : dict ={}
# Stores current size of heap.
SCREAMING_SNAKE_CASE__ : Any =0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
SCREAMING_SNAKE_CASE__ : Optional[int] =key or (lambda __lowercase : x)
def __magic_name__ ( self : List[str] , __lowercase : int ) -> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def __magic_name__ ( self : Any , __lowercase : int ) -> int | None:
SCREAMING_SNAKE_CASE__ : Optional[int] =int(2 * i + 1 )
return left if 0 < left < self.size else None
def __magic_name__ ( self : Optional[int] , __lowercase : int ) -> int | None:
SCREAMING_SNAKE_CASE__ : str =int(2 * i + 2 )
return right if 0 < right < self.size else None
def __magic_name__ ( self : int , __lowercase : int , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =(
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.arr[j], self.arr[i]
def __magic_name__ ( self : int , __lowercase : int , __lowercase : int ) -> bool:
return self.arr[i][1] < self.arr[j][1]
def __magic_name__ ( self : Optional[int] , __lowercase : int ) -> int:
SCREAMING_SNAKE_CASE__ : str =self._left(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =self._right(__lowercase )
SCREAMING_SNAKE_CASE__ : str =i
if left is not None and not self._cmp(__lowercase , __lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =left
if right is not None and not self._cmp(__lowercase , __lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] =right
return valid_parent
def __magic_name__ ( self : Union[str, Any] , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : int =self._parent(__lowercase )
while parent is not None and not self._cmp(__lowercase , __lowercase ):
self._swap(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =parent, self._parent(__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Any =self._get_valid_parent(__lowercase )
while valid_parent != index:
self._swap(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =valid_parent, self._get_valid_parent(__lowercase )
def __magic_name__ ( self : Optional[Any] , __lowercase : int , __lowercase : int ) -> None:
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE__ : str =self.pos_map[item]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[item, self.key(__lowercase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__lowercase )
self._heapify_down(__lowercase )
def __magic_name__ ( self : Any , __lowercase : int ) -> None:
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE__ : Tuple =self.pos_map[item]
del self.pos_map[item]
SCREAMING_SNAKE_CASE__ : int =self.arr[self.size - 1]
SCREAMING_SNAKE_CASE__ : int =index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__lowercase )
self._heapify_down(__lowercase )
def __magic_name__ ( self : Union[str, Any] , __lowercase : int , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__lowercase )] )
else:
SCREAMING_SNAKE_CASE__ : Any =[item, self.key(__lowercase )]
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __magic_name__ ( self : Optional[Any] ) -> tuple | None:
return self.arr[0] if self.size else None
def __magic_name__ ( self : str ) -> tuple | None:
SCREAMING_SNAKE_CASE__ : List[Any] =self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _a( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 152
|
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
a_ = re.compile(R'\s+')
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(UpperCamelCase__, '''''', example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def _a( UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[len(UpperCamelCase__ ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(UpperCamelCase__ ), "line_max": max(UpperCamelCase__ )}
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Any ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Optional[Any]=5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =['''auto-generated''', '''autogenerated''', '''automatically generated''']
SCREAMING_SNAKE_CASE__ : Dict =example['''content'''].splitlines()
for _, line in zip(range(UpperCamelCase__ ), UpperCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple=5, UpperCamelCase__ : Optional[Any]=0.0_5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =['''unit tests''', '''test file''', '''configuration file''']
SCREAMING_SNAKE_CASE__ : List[Any] =example['''content'''].splitlines()
SCREAMING_SNAKE_CASE__ : List[str] =0
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
# first test
for _, line in zip(range(UpperCamelCase__ ), UpperCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
SCREAMING_SNAKE_CASE__ : List[str] =example['''content'''].count('''\n''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''def ''', '''class ''', '''for ''', '''while ''']
SCREAMING_SNAKE_CASE__ : List[Any] =example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Dict=4 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =example['''content'''].splitlines()
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =tokenizer(example['''content'''], truncation=UpperCamelCase__ )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(example['''content'''] ) / len(UpperCamelCase__ )
return {"ratio": ratio}
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict ={}
results.update(get_hash(UpperCamelCase__ ) )
results.update(line_stats(UpperCamelCase__ ) )
results.update(alpha_stats(UpperCamelCase__ ) )
results.update(char_token_ratio(UpperCamelCase__ ) )
results.update(is_autogenerated(UpperCamelCase__ ) )
results.update(is_config_or_test(UpperCamelCase__ ) )
results.update(has_no_keywords(UpperCamelCase__ ) )
results.update(has_few_assignments(UpperCamelCase__ ) )
return results
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : str ):
'''simple docstring'''
if not check_uniques(UpperCamelCase__, UpperCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
with open(UpperCamelCase__, '''rb''' ) as f_in:
with gzip.open(str(UpperCamelCase__ ) + '''.gz''', '''wb''', compresslevel=6 ) as f_out:
shutil.copyfileobj(UpperCamelCase__, UpperCamelCase__ )
os.unlink(UpperCamelCase__ )
# Settings
a_ = HfArgumentParser(PreprocessingArguments)
a_ = parser.parse_args()
if args.num_workers is None:
a_ = multiprocessing.cpu_count()
a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
a_ = time.time()
a_ = load_dataset(args.dataset_name, split='train')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
a_ = time.time()
a_ = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
a_ = set(ds.unique('hash'))
a_ = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
a_ = time.time()
a_ = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
a_ = time.time()
a_ , a_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
a_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
a_ = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
a_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
a_ = str(data_dir / F'''file-{file_number+1:012}.json''')
a_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 152
| 1
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __a ( __UpperCamelCase ):
__lowercase : Union[List[PIL.Image.Image], np.ndarray]
__lowercase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 362
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : Optional[int] = CLIPTokenizer
__lowercase : str = CLIPTokenizerFast
__lowercase : Tuple = True
__lowercase : str = {}
__lowercase : Dict = False
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__: str = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase__: List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
lowercase__: int = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
lowercase__: Optional[int] = {'unk_token': '<unk>'}
lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase__: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = 'lower newer'
lowercase__: Dict = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__: Dict = 'lower newer'
lowercase__: Union[str, Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
lowercase__: Any = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: Tuple = tokens + [tokenizer.unk_token]
lowercase__: Tuple = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
@require_ftfy
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase__: List[str] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Optional[int] = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
lowercase__: Optional[Any] = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase__: Dict = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowercase__: Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
lowercase__: Tuple = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase__: int = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Test that the tokenization is identical on unicode of space type
lowercase__: str = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowercase__: Optional[Any] = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase__: Tuple = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Test that the tokenization is identical on unicode of line break type
lowercase__: str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowercase__: Optional[int] = tokenizer_s.tokenize(lowerCAmelCase__ )
lowercase__: Optional[int] = tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase__: Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase__: Optional[int] = F'{text_of_1_token} {text_of_1_token}'
lowercase__: int = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , )
lowercase__: Dict = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
lowercase__: Any = F' {text}'
lowercase__: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , )
lowercase__: int = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCAmelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
# CLIP always lower cases letters
pass
| 288
| 0
|
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(__snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =2
while True:
if is_prime(__snake_case ):
yield num
num += 1
def __UpperCamelCase ( lowercase__ : int = 2_00_00_00 ):
'''simple docstring'''
return sum(takewhile(lambda lowercase__ : x < n, prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 141
|
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Any = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] ='efficientformer'
def __init__( self, lowerCAmelCase = [3, 2, 6, 4], lowerCAmelCase = [48, 96, 224, 448], lowerCAmelCase = [True, True, True, True], lowerCAmelCase = 448, lowerCAmelCase = 32, lowerCAmelCase = 4, lowerCAmelCase = 7, lowerCAmelCase = 5, lowerCAmelCase = 8, lowerCAmelCase = 4, lowerCAmelCase = 0.0, lowerCAmelCase = 16, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 2, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = 1, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = 1e-5, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_2, lowerCAmelCase = 1e-12, lowerCAmelCase = 224, lowerCAmelCase = 1e-05, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =depths
lowerCamelCase_ =mlp_expansion_ratio
lowerCamelCase_ =downsamples
lowerCamelCase_ =dim
lowerCamelCase_ =key_dim
lowerCamelCase_ =attention_ratio
lowerCamelCase_ =resolution
lowerCamelCase_ =pool_size
lowerCamelCase_ =downsample_patch_size
lowerCamelCase_ =downsample_stride
lowerCamelCase_ =downsample_pad
lowerCamelCase_ =drop_path_rate
lowerCamelCase_ =num_metaad_blocks
lowerCamelCase_ =distillation
lowerCamelCase_ =use_layer_scale
lowerCamelCase_ =layer_scale_init_value
lowerCamelCase_ =image_size
lowerCamelCase_ =batch_norm_eps
| 75
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a__ : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = b.T
__SCREAMING_SNAKE_CASE = np.sum(np.square(lowerCAmelCase_ ) , axis=1 )
__SCREAMING_SNAKE_CASE = np.sum(np.square(lowerCAmelCase_ ) , axis=0 )
__SCREAMING_SNAKE_CASE = np.matmul(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = x.reshape(-1 , 3 )
__SCREAMING_SNAKE_CASE = squared_euclidean_distance(lowerCAmelCase_ , lowerCAmelCase_ )
return np.argmin(lowerCAmelCase_ , axis=1 )
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : List[str] = ["pixel_values"]
def __init__( self : List[str] , UpperCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : List[Any] , ) -> None:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = np.array(UpperCAmelCase__ ) if clusters is not None else None
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = do_color_quantize
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : str , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
UpperCAmelCase__ , size=(size["height"], size["width"]) , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = rescale(image=UpperCAmelCase__ , scale=1 / 127.5 , data_format=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = image - 1
return image
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ) -> PIL.Image.Image:
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__SCREAMING_SNAKE_CASE = clusters if clusters is not None else self.clusters
__SCREAMING_SNAKE_CASE = np.array(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=UpperCAmelCase__ ) for image in images]
if do_color_quantize:
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCAmelCase__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__SCREAMING_SNAKE_CASE = np.array(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = color_quantize(UpperCAmelCase__ , UpperCAmelCase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__SCREAMING_SNAKE_CASE = images.shape[0]
__SCREAMING_SNAKE_CASE = images.reshape(UpperCAmelCase__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__SCREAMING_SNAKE_CASE = list(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = {"input_ids": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 356
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : List[str] = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 195
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''GLPNFeatureExtractor''']
lowerCAmelCase__ = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 130
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__(_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 768 , ):
super().__init__()
lowercase__ : List[str] = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[int] = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Union[str, torch.device]] = None , SCREAMING_SNAKE_CASE : Optional[torch.dtype] = None , ):
lowercase__ : Union[str, Any] = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) )
return self
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Optional[int] = (embeds - self.mean) * 1.0 / self.std
return embeds
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = (embeds * self.std) + self.mean
return embeds
| 130
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A : List[str] = logging.get_logger(__name__)
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = original_name.split("." )[0]
__lowerCAmelCase = key.split("." )
__lowerCAmelCase = int(key_list[key_list.index(_UpperCamelCase ) - 2] )
__lowerCAmelCase = int(key_list[key_list.index(_UpperCamelCase ) - 1] )
__lowerCAmelCase = orig_block_num - offset
__lowerCAmelCase = key.replace(f"{orig_block_num}.{layer_num}.{original_name}" , f"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase , __lowerCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
__lowerCAmelCase = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
__lowerCAmelCase = key[: key.find("proj" )]
__lowerCAmelCase = key.replace(_UpperCamelCase , f"patch_embeddings.{total_embed_found}." )
__lowerCAmelCase = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
__lowerCAmelCase = "poolformer.encoder." + key
if "mlp.fc1" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "norm1" , "before_norm" )
if "norm2" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "norm2" , "after_norm" )
if "layer_scale_1" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
__lowerCAmelCase = key.replace("head" , "classifier" )
__lowerCAmelCase = value
return new_state_dict
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return image
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = PoolFormerConfig()
# set attributes based on model_name
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = model_name[-3:]
__lowerCAmelCase = 1000
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = (1, 1000)
# set config attributes
__lowerCAmelCase = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
__lowerCAmelCase = [2, 2, 6, 2]
__lowerCAmelCase = [64, 128, 320, 512]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 0.9
elif size == "s24":
__lowerCAmelCase = [4, 4, 12, 4]
__lowerCAmelCase = [64, 128, 320, 512]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 0.9
elif size == "s36":
__lowerCAmelCase = [6, 6, 18, 6]
__lowerCAmelCase = [64, 128, 320, 512]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 1e-6
__lowerCAmelCase = 0.9
elif size == "m36":
__lowerCAmelCase = [6, 6, 18, 6]
__lowerCAmelCase = [96, 192, 384, 768]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 1e-6
__lowerCAmelCase = 0.95
elif size == "m48":
__lowerCAmelCase = [8, 8, 24, 8]
__lowerCAmelCase = [96, 192, 384, 768]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 1e-6
__lowerCAmelCase = 0.95
else:
raise ValueError(f"Size {size} not supported" )
# load image processor
__lowerCAmelCase = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
# Prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info(f"Converting model {model_name}..." )
# load original state dict
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
# rename keys
__lowerCAmelCase = rename_keys(_UpperCamelCase )
# create HuggingFace model and load state dict
__lowerCAmelCase = PoolFormerForImageClassification(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# Define image processor
__lowerCAmelCase = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
__lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
__lowerCAmelCase = model(_UpperCamelCase )
__lowerCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
__lowerCAmelCase = torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
__lowerCAmelCase = torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
__lowerCAmelCase = torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
__lowerCAmelCase = torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
__lowerCAmelCase = torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(f"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A : Any = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 259
|
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if is_torch_version("<" , "2.0.0" ) or not hasattr(_UpperCamelCase , "_dynamo" ):
return False
return isinstance(_UpperCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = True ):
'''simple docstring'''
__lowerCAmelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__lowerCAmelCase = is_compiled_module(_UpperCamelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = model.module
if not keep_fpaa_wrapper:
__lowerCAmelCase = getattr(_UpperCamelCase , "forward" )
__lowerCAmelCase = model.__dict__.pop("_original_forward" , _UpperCamelCase )
if original_forward is not None:
while hasattr(_UpperCamelCase , "__wrapped__" ):
__lowerCAmelCase = forward.__wrapped__
if forward == original_forward:
break
__lowerCAmelCase = forward
if getattr(_UpperCamelCase , "_converted_to_transformer_engine" , _UpperCamelCase ):
convert_model(_UpperCamelCase , to_transformer_engine=_UpperCamelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = compiled_model
return model
def _lowerCamelCase ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_UpperCamelCase , _UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(_UpperCamelCase , _UpperCamelCase )
@contextmanager
def _lowerCamelCase ( **_UpperCamelCase ):
'''simple docstring'''
for key, value in kwargs.items():
__lowerCAmelCase = str(_UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if not hasattr(_UpperCamelCase , "__qualname__" ) and not hasattr(_UpperCamelCase , "__name__" ):
__lowerCAmelCase = getattr(_UpperCamelCase , "__class__" , _UpperCamelCase )
if hasattr(_UpperCamelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(_UpperCamelCase , "__name__" ):
return obj.__name__
return str(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for key, value in source.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = destination.setdefault(_UpperCamelCase , {} )
merge_dicts(_UpperCamelCase , _UpperCamelCase )
else:
__lowerCAmelCase = value
return destination
def _lowerCamelCase ( _UpperCamelCase = None ):
'''simple docstring'''
if port is None:
__lowerCAmelCase = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 259
| 1
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__A : CommonSchedulerState
# setable values
__A : jnp.ndarray
__A : jnp.ndarray
__A : Optional[int] = None
@classmethod
def _snake_case ( cls , __A , __A , __A ):
"""simple docstring"""
return cls(common=__A , init_noise_sigma=__A , timesteps=__A )
@dataclass
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : DDPMSchedulerState
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = [e.name for e in FlaxKarrasDiffusionSchedulers]
__A : jnp.dtype
@property
def _snake_case ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , __A = 1000 , __A = 0.0001 , __A = 0.02 , __A = "linear" , __A = None , __A = "fixed_small" , __A = True , __A = "epsilon" , __A = jnp.floataa , ):
"""simple docstring"""
lowerCamelCase : Any = dtype
def _snake_case ( self , __A = None ):
"""simple docstring"""
if common is None:
lowerCamelCase : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCamelCase : Any = jnp.array(1.0 , dtype=self.dtype )
lowerCamelCase : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__A , init_noise_sigma=__A , timesteps=__A , )
def _snake_case ( self , __A , __A , __A = None ):
"""simple docstring"""
return sample
def _snake_case ( self , __A , __A , __A = () ):
"""simple docstring"""
lowerCamelCase : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCamelCase : str = (jnp.arange(0 , __A ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__A , timesteps=__A , )
def _snake_case ( self , __A , __A , __A=None , __A=None ):
"""simple docstring"""
lowerCamelCase : Optional[int] = state.common.alphas_cumprod[t]
lowerCamelCase : Union[str, Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCamelCase : List[Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCamelCase : int = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCamelCase : List[Any] = jnp.clip(__A , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCamelCase : Tuple = jnp.log(jnp.clip(__A , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCamelCase : str = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCamelCase : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCamelCase : str = variance
lowerCamelCase : str = state.common.betas[t]
lowerCamelCase : Union[str, Any] = (predicted_variance + 1) / 2
lowerCamelCase : Any = frac * max_log + (1 - frac) * min_log
return variance
def _snake_case ( self , __A , __A , __A , __A , __A = None , __A = True , ):
"""simple docstring"""
lowerCamelCase : int = timestep
if key is None:
lowerCamelCase : str = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCamelCase , lowerCamelCase : int = jnp.split(__A , sample.shape[1] , axis=1 )
else:
lowerCamelCase : int = None
# 1. compute alphas, betas
lowerCamelCase : List[str] = state.common.alphas_cumprod[t]
lowerCamelCase : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCamelCase : Optional[int] = 1 - alpha_prod_t
lowerCamelCase : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCamelCase : Dict = model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCamelCase : Dict = jnp.clip(__A , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase : Any = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCamelCase : Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase : List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCamelCase : Dict = jax.random.split(__A , num=1 )
lowerCamelCase : int = jax.random.normal(__A , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__A , __A , predicted_variance=__A ) ** 0.5) * noise
lowerCamelCase : Any = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCamelCase : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__A , state=__A )
def _snake_case ( self , __A , __A , __A , __A , ):
"""simple docstring"""
return add_noise_common(state.common , __A , __A , __A )
def _snake_case ( self , __A , __A , __A , __A , ):
"""simple docstring"""
return get_velocity_common(state.common , __A , __A , __A )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 283
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["image_processor", "tokenizer"]
__A : Dict = "BridgeTowerImageProcessor"
__A : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , __A , __A ):
"""simple docstring"""
super().__init__(__A , __A )
def __call__( self , __A , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ):
"""simple docstring"""
lowerCamelCase : str = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel_values + pixel_mask
lowerCamelCase : int = self.image_processor(
__A , return_tensors=__A , do_normalize=__A , do_center_crop=__A , **__A )
encoding.update(__A )
return encoding
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.tokenizer.model_input_names
lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 283
| 1
|
'''simple docstring'''
import sys
import turtle
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
__a = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
__a = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 350
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "markuplm"
def __init__( self : List[Any] , snake_case_ : List[Any]=30_522 , snake_case_ : Tuple=768 , snake_case_ : Union[str, Any]=12 , snake_case_ : str=12 , snake_case_ : Optional[Any]=3_072 , snake_case_ : Optional[Any]="gelu" , snake_case_ : str=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Dict=512 , snake_case_ : Tuple=2 , snake_case_ : List[str]=0.02 , snake_case_ : int=1E-1_2 , snake_case_ : Any=0 , snake_case_ : Any=0 , snake_case_ : str=2 , snake_case_ : Optional[int]=256 , snake_case_ : Optional[int]=1_024 , snake_case_ : str=216 , snake_case_ : List[str]=1_001 , snake_case_ : Optional[Any]=32 , snake_case_ : int=50 , snake_case_ : Tuple="absolute" , snake_case_ : Tuple=True , snake_case_ : int=None , **snake_case_ : str , ):
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : List[str] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : Optional[int] = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : str = layer_norm_eps
snake_case__ : List[Any] = position_embedding_type
snake_case__ : Any = use_cache
snake_case__ : Union[str, Any] = classifier_dropout
# additional properties
snake_case__ : List[str] = max_depth
snake_case__ : int = max_xpath_tag_unit_embeddings
snake_case__ : Tuple = max_xpath_subs_unit_embeddings
snake_case__ : Dict = tag_pad_id
snake_case__ : Union[str, Any] = subs_pad_id
snake_case__ : Tuple = xpath_unit_hidden_size
| 43
| 0
|
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
UpperCamelCase__ = TypeVar('''KT''')
UpperCamelCase__ = TypeVar('''VT''')
class lowerCamelCase_ ( Generic[KT, VT] ):
def __init__( self : int , _A : KT | str = "root" , _A : VT | None = None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = key
UpperCAmelCase__ : Optional[int] = value
UpperCAmelCase__ : Tuple = []
def __repr__( self : Any ):
'''simple docstring'''
return f"""Node({self.key}: {self.value})"""
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return len(self.forward )
class lowerCamelCase_ ( Generic[KT, VT] ):
def __init__( self : List[Any] , _A : float = 0.5 , _A : int = 16 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = Node[KT, VT]()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : int = p
UpperCAmelCase__ : Optional[Any] = max_level
def __str__( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = list(self )
if len(_A ) == 0:
return f"""SkipList(level={self.level})"""
UpperCAmelCase__ : Union[str, Any] = max((len(str(_A ) ) for item in items) , default=4 )
UpperCAmelCase__ : Optional[Any] = max(_A , 4 ) + 4
UpperCAmelCase__ : List[str] = self.head
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : Dict = node.forward.copy()
lines.append(f"""[{node.key}]""".ljust(_A , '''-''' ) + '''* ''' * len(_A ) )
lines.append(''' ''' * label_size + '''| ''' * len(_A ) )
while len(node.forward ) != 0:
UpperCAmelCase__ : Tuple = node.forward[0]
lines.append(
f"""[{node.key}]""".ljust(_A , '''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(_A ) )
UpperCAmelCase__ : Tuple = node.forward
lines.append('''None'''.ljust(_A ) + '''* ''' * len(_A ) )
return f"""SkipList(level={self.level})\n""" + "\n".join(_A )
def __iter__( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
UpperCAmelCase__ : Optional[Any] = node.forward[0]
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def lowercase_ ( self : Optional[Any] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : List[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCAmelCase__ : Optional[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_A )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def lowercase_ ( self : List[str] , _A : KT ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self._locate_node(_A )
if node is not None:
for i, update_node in enumerate(_A ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCAmelCase__ : int = node.forward[i]
else:
UpperCAmelCase__ : str = update_node.forward[:i]
def lowercase_ ( self : Dict , _A : KT , _A : VT ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self._locate_node(_A )
if node is not None:
UpperCAmelCase__ : Tuple = value
else:
UpperCAmelCase__ : Optional[Any] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _A ):
update_vector.append(self.head )
UpperCAmelCase__ : Any = level
UpperCAmelCase__ : Tuple = Node(_A , _A )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_A )
else:
UpperCAmelCase__ : int = new_node
def lowercase_ ( self : Any , _A : VT ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = self._locate_node(_A )
if node is not None:
return node.value
return None
def a__ ( ) -> Optional[Any]:
UpperCAmelCase__ : Dict = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
UpperCAmelCase__ : Tuple = skip_list.head
UpperCAmelCase__ : int = {}
while node.level != 0:
UpperCAmelCase__ : List[Any] = node.forward[0]
UpperCAmelCase__ : Optional[int] = node.value
assert len(UpperCamelCase__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def a__ ( ) -> Any:
UpperCAmelCase__ : Dict = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
UpperCAmelCase__ : int = skip_list.head
UpperCAmelCase__ : List[Any] = {}
while node.level != 0:
UpperCAmelCase__ : List[Any] = node.forward[0]
UpperCAmelCase__ : Union[str, Any] = node.value
if len(UpperCamelCase__ ) != 4:
print()
assert len(UpperCamelCase__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def a__ ( ) -> Optional[Any]:
UpperCAmelCase__ : Optional[Any] = SkipList()
assert skip_list.find('''Some key''' ) is None
def a__ ( ) -> str:
UpperCAmelCase__ : Optional[Any] = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def a__ ( ) -> int:
UpperCAmelCase__ : Optional[Any] = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def a__ ( ) -> int:
UpperCAmelCase__ : Tuple = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def a__ ( ) -> List[str]:
UpperCAmelCase__ : str = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : Optional[Any] = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 1_42 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(lowerCAmelCase__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(UpperCamelCase__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def a__ ( ) -> Dict:
def is_sorted(lowerCAmelCase__ ):
return all(next_item >= item for item, next_item in zip(UpperCamelCase__ , lst[1:] ) )
UpperCAmelCase__ : int = SkipList()
for i in range(10 ):
skip_list.insert(UpperCamelCase__ , UpperCamelCase__ )
assert is_sorted(list(UpperCamelCase__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(UpperCamelCase__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(UpperCamelCase__ ) )
def a__ ( ) -> int:
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Optional[Any] = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 181
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =["image_processor", "tokenizer"]
lowerCamelCase : Union[str, Any] ="LayoutLMv2ImageProcessor"
lowerCamelCase : int =("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[int] , a : Any=None , a : Any=None , **a : Union[str, Any] ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a , a )
def __call__( self : Tuple , a : Optional[int] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : Tuple , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__lowerCamelCase = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
__lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCamelCase = features['''words''']
__lowerCamelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
__lowerCamelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__lowerCamelCase = self.get_overflowing_images(a , encoded_inputs['''overflow_to_sample_mapping'''] )
__lowerCamelCase = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] , a : str ):
"""simple docstring"""
__lowerCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(a )} and {len(a )}""" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *a : Optional[Any] , **a : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *a : Union[str, Any] , **a : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , )
return self.image_processor
| 67
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Any = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ : List[str] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ : Tuple = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
lowerCAmelCase_ : str = '''▁'''
class __lowerCAmelCase ( __a ):
snake_case : List[str] = VOCAB_FILES_NAMES
snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : str = ["""input_ids""", """attention_mask"""]
snake_case : List[Any] = BarthezTokenizer
def __init__(self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , **lowerCAmelCase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : List[str] = vocab_file
_UpperCAmelCase : Tuple = False if not self.vocab_file else True
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : int = [self.cls_token_id]
_UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
_UpperCAmelCase : str = [self.sep_token_id]
_UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 170
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCAmelCase_ : Optional[Any] = 10
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : str = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Tuple = (left + right) // 3 + 1
_UpperCAmelCase : str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_UpperCAmelCase : List[Any] = one_third - 1
elif array[two_third] < target:
_UpperCAmelCase : Optional[Any] = two_third + 1
else:
_UpperCAmelCase : Dict = one_third + 1
_UpperCAmelCase : List[Any] = two_third - 1
else:
return -1
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = (left + right) // 3 + 1
_UpperCAmelCase : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : int = input('''Enter numbers separated by comma:\n''').strip()
lowerCAmelCase_ : Tuple = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
lowerCAmelCase_ : Any = int(input('''Enter the number to be found in the list:\n''').strip())
lowerCAmelCase_ : List[str] = ite_ternary_search(collection, target)
lowerCAmelCase_ : int = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"Iterative search: {target} found at positions: {resulta}")
print(F"Recursive search: {target} found at positions: {resulta}")
else:
print('''Not found''')
| 170
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Optional[Any] ={
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any =['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict =[
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] =[
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
a__ : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53
|
"""simple docstring"""
from __future__ import annotations
__A = tuple[int, int, int]
__A = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__A = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
__A = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
__A = """FOBHMDKEXQNRAULPGSJVTYICZW"""
__A = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
__A = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
__A = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
__A = """SGLCPQWZHKXAREONTFBVIYJUDM"""
__A = """HVSICLTYKQUBXDWAJZOMFGPREN"""
__A = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
__A = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
__A = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
"""simple docstring"""
if (unique_rotsel := len(set(_SCREAMING_SNAKE_CASE ) )) < 3:
lowerCAmelCase__ :Union[str, Any] = F"Please use 3 unique rotors (not {unique_rotsel})"
raise Exception(_SCREAMING_SNAKE_CASE )
# Checks if rotor positions are valid
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = rotpos
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Tuple = F"First rotor position is not within range of 1..26 ({rotorposa}"
raise ValueError(_SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = F"Second rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Union[str, Any] = F"Third rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_SCREAMING_SNAKE_CASE )
# Validates string and returns dict
lowerCAmelCase__ :int = _plugboard(_SCREAMING_SNAKE_CASE )
return rotpos, rotsel, pbdict
def __A (_SCREAMING_SNAKE_CASE ) ->dict[str, str]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :str = F"Plugboard setting isn't type string ({type(_SCREAMING_SNAKE_CASE )})"
raise TypeError(_SCREAMING_SNAKE_CASE )
elif len(_SCREAMING_SNAKE_CASE ) % 2 != 0:
lowerCAmelCase__ :str = F"Odd number of symbols ({len(_SCREAMING_SNAKE_CASE )})"
raise Exception(_SCREAMING_SNAKE_CASE )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
lowerCAmelCase__ :Any = set()
for i in pbstring:
if i not in abc:
lowerCAmelCase__ :Any = F"'{i}' not in list of symbols"
raise Exception(_SCREAMING_SNAKE_CASE )
elif i in tmppbl:
lowerCAmelCase__ :Dict = F"Duplicate symbol ({i})"
raise Exception(_SCREAMING_SNAKE_CASE )
else:
tmppbl.add(_SCREAMING_SNAKE_CASE )
del tmppbl
# Created the dictionary
lowerCAmelCase__ :List[Any] = {}
for j in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 , 2 ):
lowerCAmelCase__ :Optional[int] = pbstring[j + 1]
lowerCAmelCase__ :Union[str, Any] = pbstring[j]
return pb
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , _SCREAMING_SNAKE_CASE = "" , ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Tuple = text.upper()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = _validator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , plugb.upper() )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = rotor_position
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowerCAmelCase__ :Dict = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowerCAmelCase__ :Dict = plugboard[symbol]
# rotor ra --------------------------
lowerCAmelCase__ :Optional[int] = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
lowerCAmelCase__ :str = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# rotor rb --------------------------
lowerCAmelCase__ :Optional[int] = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
lowerCAmelCase__ :int = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# rotor rc --------------------------
lowerCAmelCase__ :str = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
lowerCAmelCase__ :Optional[Any] = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowerCAmelCase__ :str = reflector[symbol]
# 2nd rotors
lowerCAmelCase__ :Tuple = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
lowerCAmelCase__ :Optional[int] = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
lowerCAmelCase__ :Any = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowerCAmelCase__ :Union[str, Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :str = 0
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :List[Any] = 0
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = """This is my Python script that emulates the Enigma machine from WWII."""
__A = (1, 1, 1)
__A = """pictures"""
__A = (rotora, rotora, rotora)
__A = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 293
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
__lowercase = 42
__lowercase = None
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
__lowercase = 2
@register_to_config
def __init__( self , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 1_00 , lowerCAmelCase_ = 1.007 , lowerCAmelCase_ = 80 , lowerCAmelCase_ = 0.05 , lowerCAmelCase_ = 50 , ):
"""simple docstring"""
_snake_case = sigma_max
# setable values
_snake_case = None
_snake_case = None
_snake_case = None # sigma(t_i)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
return sample
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = num_inference_steps
_snake_case = np.arange(0 , self.num_inference_steps )[::-1].copy()
_snake_case = torch.from_numpy(lowerCAmelCase_ ).to(lowerCAmelCase_ )
_snake_case = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_snake_case = torch.tensor(lowerCAmelCase_ , dtype=torch.floataa , device=lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
_snake_case = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_snake_case = 0
# sample eps ~ N(0, S_noise^2 * I)
_snake_case = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase_ ).to(sample.device )
_snake_case = sigma + gamma * sigma
_snake_case = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True , ):
"""simple docstring"""
_snake_case = sample_hat + sigma_hat * model_output
_snake_case = (sample_hat - pred_original_sample) / sigma_hat
_snake_case = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True , ):
"""simple docstring"""
_snake_case = sample_prev + sigma_prev * model_output
_snake_case = (sample_prev - pred_original_sample) / sigma_prev
_snake_case = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
raise NotImplementedError()
| 160
|
'''simple docstring'''
import torch
from transformers import AutoModel
class __UpperCAmelCase ( torch.nn.Module ):
def __init__( self , lowerCAmelCase_="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(lowerCAmelCase_ , self ).__init__()
_snake_case = AutoModel.from_pretrained(lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = torch.nn.CosineSimilarity(3 , 1E-08 )
_snake_case = torch.nn.Softmax(dim=1 )
def lowerCamelCase ( self , **lowerCAmelCase_ ):
"""simple docstring"""
return self.bert(**lowerCAmelCase_ ).last_hidden_state
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(lowerCAmelCase_ , lowerCAmelCase_ ) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = W_supports['sizes'].tolist()
_snake_case = W_supports['start_token_id'].item()
_snake_case = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_snake_case = self.BERT(**lowerCAmelCase_ )
_snake_case = self.BERT(**lowerCAmelCase_ )
_snake_case = None
_snake_case = None
_snake_case = W_supports['input_ids'] == start_token_id
_snake_case = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCAmelCase_ ):
if i == 0:
_snake_case = 0
else:
_snake_case = support_sizes[i - 1]
_snake_case = S[s : s + size][start_token_masks[s : s + size]]
_snake_case = S[s : s + size][end_token_masks[s : s + size]]
_snake_case = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_snake_case = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_snake_case = torch.vstack((p_starts, p_start) )
_snake_case = torch.vstack((p_ends, p_end) )
else:
_snake_case = p_start
_snake_case = p_end
return p_starts, p_ends
| 160
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : str = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
a__ : Optional[Any] =original_name.split("." )[0]
a__ : List[str] =key.split("." )
a__ : List[str] =int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] )
a__ : List[str] =int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] )
a__ : int =orig_block_num - offset
a__ : List[str] =key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : List[str] =OrderedDict()
a__ , a__ : List[Any] =0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
a__ : Any =key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
a__ : Dict =key[: key.find("proj" )]
a__ : str =key.replace(SCREAMING_SNAKE_CASE , f'''patch_embeddings.{total_embed_found}.''' )
a__ : Optional[Any] =key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
a__ : int ="poolformer.encoder." + key
if "mlp.fc1" in key:
a__ : str =replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
a__ : Tuple =replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
a__ : Any =replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "norm1" , "before_norm" )
if "norm2" in key:
a__ : Tuple =replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "norm2" , "after_norm" )
if "layer_scale_1" in key:
a__ : str =replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
a__ : str =replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
a__ : Optional[int] =key.replace("head" , "classifier" )
a__ : Union[str, Any] =value
return new_state_dict
def _A ( ):
"""simple docstring"""
a__ : Optional[int] ="http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : List[str] =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
a__ : Optional[int] =PoolFormerConfig()
# set attributes based on model_name
a__ : Tuple ="huggingface/label-files"
a__ : Dict =model_name[-3:]
a__ : Optional[int] =1_000
a__ : Optional[int] ="imagenet-1k-id2label.json"
a__ : int =(1, 1_000)
# set config attributes
a__ : Any =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
a__ : List[Any] ={int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
a__ : Any =idalabel
a__ : int ={v: k for k, v in idalabel.items()}
if size == "s12":
a__ : str =[2, 2, 6, 2]
a__ : List[str] =[64, 128, 320, 512]
a__ : Any =4.0
a__ : Tuple =0.9
elif size == "s24":
a__ : List[str] =[4, 4, 12, 4]
a__ : Union[str, Any] =[64, 128, 320, 512]
a__ : str =4.0
a__ : Optional[Any] =0.9
elif size == "s36":
a__ : List[str] =[6, 6, 18, 6]
a__ : int =[64, 128, 320, 512]
a__ : Tuple =4.0
a__ : Optional[int] =1e-6
a__ : int =0.9
elif size == "m36":
a__ : List[Any] =[6, 6, 18, 6]
a__ : List[str] =[96, 192, 384, 768]
a__ : Optional[int] =4.0
a__ : List[str] =1e-6
a__ : List[Any] =0.9_5
elif size == "m48":
a__ : List[str] =[8, 8, 24, 8]
a__ : List[str] =[96, 192, 384, 768]
a__ : Optional[Any] =4.0
a__ : int =1e-6
a__ : Optional[Any] =0.9_5
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
a__ : str =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
# Prepare image
a__ : List[Any] =prepare_img()
a__ : Tuple =image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
a__ : Optional[int] =torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )
# rename keys
a__ : Dict =rename_keys(SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
a__ : Optional[int] =PoolFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
a__ : Optional[int] =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
a__ : Any =image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
a__ : str =model(SCREAMING_SNAKE_CASE )
a__ : List[str] =outputs.logits
# define expected logit slices for different models
if size == "s12":
a__ : Any =torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
a__ : List[Any] =torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
a__ : Any =torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
a__ : Dict =torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
a__ : Optional[Any] =torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 95
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = "arrow" , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : int =load_from_cache_file
a__ : Tuple =file_format
a__ : List[Any] =Spark(
df=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , working_dir=lowerCAmelCase__ , **lowerCAmelCase__ , )
def _lowercase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
a__ : str =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCAmelCase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 95
| 1
|
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCamelCase ( unittest.TestCase ):
@slow
def _lowercase (self : Dict) -> Optional[Any]:
__snake_case : Union[str, Any] = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small')
__snake_case : int = AutoTokenizer.from_pretrained('google/mt5-small')
__snake_case : List[Any] = tokenizer('Hello there' , return_tensors='np').input_ids
__snake_case : List[str] = tokenizer('Hi I am' , return_tensors='np').input_ids
__snake_case : Tuple = shift_tokens_right(_A , model.config.pad_token_id , model.config.decoder_start_token_id)
__snake_case : Union[str, Any] = model(_A , decoder_input_ids=_A).logits
__snake_case : str = optax.softmax_cross_entropy(_A , onehot(_A , logits.shape[-1])).mean()
__snake_case : Dict = -(labels.shape[-1] * loss.item())
__snake_case : List[Any] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1E-4)
| 355
|
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 ) -> int:
'''simple docstring'''
__snake_case : str = right or len(UpperCAmelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase_ , UpperCAmelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
A__ : str = logging.get_logger(__name__)
A__ : str = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : int = 'imagegpt'
lowerCamelCase : str = ['past_key_values']
lowerCamelCase : Optional[Any] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , SCREAMING_SNAKE_CASE_=5_12 + 1 , SCREAMING_SNAKE_CASE_=32 * 32 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="quick_gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Any:
__lowerCamelCase : Any = vocab_size
__lowerCamelCase : List[Any] = n_positions
__lowerCamelCase : Optional[int] = n_embd
__lowerCamelCase : Union[str, Any] = n_layer
__lowerCamelCase : Optional[Any] = n_head
__lowerCamelCase : int = n_inner
__lowerCamelCase : Tuple = activation_function
__lowerCamelCase : Optional[Any] = resid_pdrop
__lowerCamelCase : str = embd_pdrop
__lowerCamelCase : Dict = attn_pdrop
__lowerCamelCase : Any = layer_norm_epsilon
__lowerCamelCase : Dict = initializer_range
__lowerCamelCase : int = scale_attn_weights
__lowerCamelCase : Dict = use_cache
__lowerCamelCase : Any = scale_attn_by_inverse_layer_idx
__lowerCamelCase : List[str] = reorder_and_upcast_attn
__lowerCamelCase : Optional[int] = tie_word_embeddings
super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 32 , ) -> Mapping[str, Any]:
__lowerCamelCase : int = self._generate_dummy_images(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = dict(preprocessor(images=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) )
return inputs
| 185
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : List[str] = {"""vocab_file""": """spm_char.model"""}
A__ : str = {
"""vocab_file""": {
"""microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""",
"""microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""",
"""microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""",
}
}
A__ : List[Any] = {
"""microsoft/speecht5_asr""": 1024,
"""microsoft/speecht5_tts""": 1024,
"""microsoft/speecht5_vc""": 1024,
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> None:
__lowerCamelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : List[Any] = vocab_file
__lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE_ )
@property
def lowercase_ ( self ) -> List[str]:
return self.sp_model.get_piece_size()
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
__lowerCamelCase : Dict = self.__dict__.copy()
__lowerCamelCase : int = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str:
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Any = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
return token
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : str = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
__lowerCamelCase : int = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = [1]
if token_ids_a is None:
return ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
return ([0] * len(SCREAMING_SNAKE_CASE_ )) + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : Tuple = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as fi:
__lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 185
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a__ = 16
a__ = 32
def __UpperCAmelCase ( __a : Accelerator ,__a : int = 16 ,__a : str = "bert-base-cased" ) -> int:
"""simple docstring"""
_a : int = AutoTokenizer.from_pretrained(__a )
_a : Optional[Any] = load_dataset('''glue''' ,'''mrpc''' )
def tokenize_function(__a : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_a : int = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=__a ,max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_a : Optional[Any] = datasets.map(
__a ,batched=__a ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,load_from_cache_file=__a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : Union[str, Any] = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(__a : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a ,padding='''max_length''' ,max_length=128 ,return_tensors='''pt''' )
return tokenizer.pad(__a ,padding='''longest''' ,return_tensors='''pt''' )
# Instantiate dataloaders.
_a : Dict = DataLoader(
tokenized_datasets['''train'''] ,shuffle=__a ,collate_fn=__a ,batch_size=__a )
_a : List[str] = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=__a ,collate_fn=__a ,batch_size=__a )
return train_dataloader, eval_dataloader
def __UpperCAmelCase ( __a : str ,__a : Tuple ) -> List[str]:
"""simple docstring"""
_a : Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Optional[int] = config['''lr''']
_a : Optional[int] = int(config['''num_epochs'''] )
_a : Optional[Any] = int(config['''seed'''] )
_a : Union[str, Any] = int(config['''batch_size'''] )
_a : List[str] = args.model_name_or_path
set_seed(__a )
_a , _a : List[Any] = get_dataloaders(__a ,__a ,__a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : str = AutoModelForSequenceClassification.from_pretrained(__a ,return_dict=__a )
# Instantiate optimizer
_a : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_a : Union[str, Any] = optimizer_cls(params=model.parameters() ,lr=__a )
if accelerator.state.deepspeed_plugin is not None:
_a : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
_a : str = 1
_a : Optional[Any] = (len(__a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_a : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=__a ,num_warmup_steps=0 ,num_training_steps=__a ,)
else:
_a : Optional[int] = DummyScheduler(__a ,total_num_steps=__a ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a : str = accelerator.prepare(
__a ,__a ,__a ,__a ,__a )
# We need to keep track of how many total steps we have iterated over
_a : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_a : Dict = 0
# Now we train the model
_a : Dict = evaluate.load('''glue''' ,'''mrpc''' )
_a : Union[str, Any] = 0
_a : int = {}
for epoch in range(__a ,__a ):
model.train()
for step, batch in enumerate(__a ):
_a : List[Any] = model(**__a )
_a : Any = outputs.loss
_a : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_a : Dict = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Union[str, Any] = model(**__a )
_a : Dict = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_a , _a : Tuple = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__a ) - 1:
_a : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__a ,references=__a ,)
_a : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,__a )
_a : List[Any] = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
_a : Union[str, Any] = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,'''all_results.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_a : int = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' ,type=__a ,default='''bert-base-cased''' ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,required=__a ,)
parser.add_argument(
'''--output_dir''' ,type=__a ,default='''.''' ,help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' ,)
parser.add_argument(
'''--performance_lower_bound''' ,type=__a ,default=__a ,help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' ,)
parser.add_argument(
'''--num_epochs''' ,type=__a ,default=3 ,help='''Number of train epochs.''' ,)
_a : Union[str, Any] = parser.parse_args()
_a : Optional[int] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__a ,__a )
if __name__ == "__main__":
main()
| 15
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a__ = logging.get_logger(__name__)
@add_end_docstrings(
__lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self , _a ) -> np.ndarray:
if self.framework == "tf":
_a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __lowercase ( self , _a ) -> np.ndarray:
_a : int = self.get_masked_index(_a )
_a : Tuple = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __lowercase ( self , _a ) -> Optional[int]:
if isinstance(_a , _a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_a )
def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]:
if return_tensors is None:
_a : Union[str, Any] = self.framework
_a : str = self.tokenizer(_a , return_tensors=_a )
self.ensure_exactly_one_mask_token(_a )
return model_inputs
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[str] = self.model(**_a )
_a : Any = model_inputs['''input_ids''']
return model_outputs
def __lowercase ( self , _a , _a=5 , _a=None ) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_a : List[Any] = target_ids.shape[0]
_a : Any = model_outputs['''input_ids'''][0]
_a : List[str] = model_outputs['''logits''']
if self.framework == "tf":
_a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_a : List[str] = outputs.numpy()
_a : Dict = outputs[0, masked_index, :]
_a : str = stable_softmax(_a , axis=-1 )
if target_ids is not None:
_a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) )
_a : Union[str, Any] = tf.expand_dims(_a , 0 )
_a : Optional[int] = tf.math.top_k(_a , k=_a )
_a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
_a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_a : List[str] = outputs[0, masked_index, :]
_a : List[Any] = logits.softmax(dim=-1 )
if target_ids is not None:
_a : List[Any] = probs[..., target_ids]
_a , _a : Optional[Any] = probs.topk(_a )
_a : Dict = []
_a : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_a : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_a : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
_a : Tuple = target_ids[p].tolist()
_a : List[str] = p
# Filter padding out:
_a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a )
_a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_a )
result.append(_a )
if single_mask:
return result[0]
return result
def __lowercase ( self , _a , _a=None ) -> Dict:
if isinstance(_a , _a ):
_a : Tuple = [targets]
try:
_a : int = self.tokenizer.get_vocab()
except Exception:
_a : Any = {}
_a : List[Any] = []
for target in targets:
_a : List[Any] = vocab.get(_a , _a )
if id_ is None:
_a : Tuple = self.tokenizer(
_a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids''']
if len(_a ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
_a : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
_a : List[str] = list(set(_a ) )
if len(_a ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
_a : int = np.array(_a )
return target_ids
def __lowercase ( self , _a=None , _a=None ) -> Tuple:
_a : str = {}
if targets is not None:
_a : List[Any] = self.get_target_ids(_a , _a )
_a : Optional[Any] = target_ids
if top_k is not None:
_a : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _a , *_a , **_a ) -> int:
_a : Optional[Any] = super().__call__(_a , **_a )
if isinstance(_a , _a ) and len(_a ) == 1:
return outputs[0]
return outputs
| 15
| 1
|
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A (__A : Tuple , __A : Optional[Any] , __A : Optional[int] ) -> List[str]:
"""simple docstring"""
if gpta_config_file == "":
UpperCAmelCase_ = GPTaConfig()
else:
UpperCAmelCase_ = GPTaConfig.from_json_file(__lowerCamelCase )
UpperCAmelCase_ = GPTaModel(__lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , __lowerCamelCase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
snake_case_ : str = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 51
|
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _UpperCAmelCase ( __lowerCamelCase : int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(__lowerCamelCase ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
_snake_case = QuantumRegister(__lowerCamelCase , '''qr''' )
_snake_case = ClassicalRegister(__lowerCamelCase , '''cr''' )
_snake_case = QuantumCircuit(__lowerCamelCase , __lowerCamelCase )
_snake_case = number_of_qubits
for i in range(__lowerCamelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__lowerCamelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __lowerCamelCase , __lowerCamelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__lowerCamelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__lowerCamelCase , __lowerCamelCase )
# simulate with 10000 shots
_snake_case = Aer.get_backend('''qasm_simulator''' )
_snake_case = execute(__lowerCamelCase , __lowerCamelCase , shots=1_00_00 )
return job.result().get_counts(__lowerCamelCase )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 288
| 0
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class a_ ( a__ , a__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowerCamelCase = 128 , _lowerCamelCase = 256 , _lowerCamelCase = 2000.0 , _lowerCamelCase = 768 , _lowerCamelCase = 12 , _lowerCamelCase = 12 , _lowerCamelCase = 64 , _lowerCamelCase = 2048 , _lowerCamelCase = 0.1 , ) ->int:
super().__init__()
SCREAMING_SNAKE_CASE : str = nn.Sequential(
nn.Linear(_lowerCamelCase , d_model * 4 , bias=_lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowerCamelCase ) , nn.SiLU() , )
SCREAMING_SNAKE_CASE : List[Any] = nn.Embedding(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(p=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList()
for lyr_num in range(_lowerCamelCase ):
# FiLM conditional T5 decoder
SCREAMING_SNAKE_CASE : Tuple = DecoderLayer(d_model=_lowerCamelCase , d_kv=_lowerCamelCase , num_heads=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase )
self.decoders.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = TaLayerNorm(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(p=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : int = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
SCREAMING_SNAKE_CASE : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
SCREAMING_SNAKE_CASE : List[str] = self.conditioning_emb(_lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
SCREAMING_SNAKE_CASE : Any = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
SCREAMING_SNAKE_CASE : List[str] = torch.broadcast_to(
torch.arange(_lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
SCREAMING_SNAKE_CASE : Dict = self.position_encoding(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.continuous_inputs_projection(_lowerCamelCase )
inputs += position_encodings
SCREAMING_SNAKE_CASE : int = self.dropout(_lowerCamelCase )
# decoder: No padding present.
SCREAMING_SNAKE_CASE : Any = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
SCREAMING_SNAKE_CASE : Optional[int] = [(x, self.encoder_decoder_mask(_lowerCamelCase , _lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
SCREAMING_SNAKE_CASE : List[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
SCREAMING_SNAKE_CASE : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
SCREAMING_SNAKE_CASE : List[str] = lyr(
_lowerCamelCase , conditioning_emb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : List[str] = self.decoder_norm(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = self.post_dropout(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.spec_out(_lowerCamelCase )
return spec_out
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1e-6 ) ->List[str]:
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowerCamelCase , d_kv=_lowerCamelCase , num_heads=_lowerCamelCase , dropout_rate=_lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowerCamelCase , d_kv=_lowerCamelCase , num_heads=_lowerCamelCase , dropout_rate=_lowerCamelCase , layer_norm_epsilon=_lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase , layer_norm_epsilon=_lowerCamelCase ) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[Any] = self.layer[0](
_lowerCamelCase , conditioning_emb=_lowerCamelCase , attention_mask=_lowerCamelCase , )
if encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE : List[str] = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
SCREAMING_SNAKE_CASE : Optional[int] = self.layer[1](
_lowerCamelCase , key_value_states=_lowerCamelCase , attention_mask=_lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
SCREAMING_SNAKE_CASE : int = self.layer[-1](_lowerCamelCase , _lowerCamelCase )
return (hidden_states,)
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[str]:
super().__init__()
SCREAMING_SNAKE_CASE : List[Any] = TaLayerNorm(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=_lowerCamelCase , heads=_lowerCamelCase , dim_head=_lowerCamelCase , out_bias=_lowerCamelCase , scale_qk=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , ) ->List[str]:
# pre_self_attention_layer_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = self.layer_norm(_lowerCamelCase )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.FiLMLayer(_lowerCamelCase , _lowerCamelCase )
# Self-attention block
SCREAMING_SNAKE_CASE : str = self.attention(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(_lowerCamelCase )
return hidden_states
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=_lowerCamelCase , heads=_lowerCamelCase , dim_head=_lowerCamelCase , out_bias=_lowerCamelCase , scale_qk=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(_lowerCamelCase , eps=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = self.layer_norm(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.attention(
_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(_lowerCamelCase )
return layer_output
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int:
super().__init__()
SCREAMING_SNAKE_CASE : Dict = TaDenseGatedActDense(d_model=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = TaLayerNorm(_lowerCamelCase , eps=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Any = self.layer_norm(_lowerCamelCase )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.film(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : str = self.DenseReluDense(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(_lowerCamelCase )
return hidden_states
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int:
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = nn.Dropout(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = NewGELUActivation()
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = self.wi_a(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = hidden_gelu * hidden_linear
SCREAMING_SNAKE_CASE : int = self.dropout(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.wo(_lowerCamelCase )
return hidden_states
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1e-6 ) ->List[str]:
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.ones(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = eps
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
SCREAMING_SNAKE_CASE : Dict = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class a_ ( nn.Module ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase ) ->torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_lowerCamelCase , 3.0 )) ))
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ) ->Any:
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = nn.Linear(_lowerCamelCase , out_features * 2 , bias=_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.scale_bias(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.chunk(_lowerCamelCase , 2 , -1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = x * (1 + scale) + shift
return x
| 357
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ : Optional[Any] = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['''DeiTFeatureExtractor''']
a__ : Any = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE_ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
class UpperCamelCase__ ( a_ ):
'''simple docstring'''
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Tuple = ["input_ids", "attention_mask"]
__snake_case : str = BartTokenizer
def __init__( self : Union[str, Any] ,lowerCamelCase__ : int=None ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : str="replace" ,lowerCamelCase__ : List[Any]="<s>" ,lowerCamelCase__ : Dict="</s>" ,lowerCamelCase__ : List[Any]="</s>" ,lowerCamelCase__ : Union[str, Any]="<s>" ,lowerCamelCase__ : Dict="<unk>" ,lowerCamelCase__ : Any="<pad>" ,lowerCamelCase__ : List[str]="<mask>" ,lowerCamelCase__ : int=False ,lowerCamelCase__ : int=True ,**lowerCamelCase__ : int ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(
lowercase_ ,lowercase_ ,tokenizer_file=lowercase_ ,errors=lowercase_ ,bos_token=lowercase_ ,eos_token=lowercase_ ,sep_token=lowercase_ ,cls_token=lowercase_ ,unk_token=lowercase_ ,pad_token=lowercase_ ,mask_token=lowercase_ ,add_prefix_space=lowercase_ ,trim_offsets=lowercase_ ,**lowercase_ ,)
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,lowercase_ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(lowercase_ ,pre_tok_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**lowercase_ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = '''post_processor'''
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer ,lowercase_ ,lowercase_ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state["""sep"""] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state["""cls"""] )
SCREAMING_SNAKE_CASE = False
if state.get("""add_prefix_space""" ,lowercase_ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get("""trim_offsets""" ,lowercase_ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(lowercase_ ,state.pop("""type""" ) )
SCREAMING_SNAKE_CASE = component_class(**lowercase_ )
setattr(self.backend_tokenizer ,lowercase_ ,lowercase_ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AddedToken(lowercase_ ,lstrip=lowercase_ ,rstrip=lowercase_ ) if isinstance(lowercase_ ,lowercase_ ) else value
SCREAMING_SNAKE_CASE = value
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" ,lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*lowercase_ ,**lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,*lowerCamelCase__ : str ,**lowerCamelCase__ : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" ,lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*lowercase_ ,**lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowercase_ ,name=lowercase_ )
return tuple(lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict=None ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 296
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def __SCREAMING_SNAKE_CASE ( A_ = 1_00_00_00 , A_ = 10 ):
lowerCAmelCase__ : defaultdict = defaultdict(A_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase__ : int = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase__ : Tuple = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(A_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 106
| 0
|
'''simple docstring'''
import argparse
import copy
def _A ( snake_case ) -> Tuple:
_lowercase : Any = {}
with open(snake_case ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_lowercase : Dict = []
_list.append([line.split()[1], line.split()[2]] )
_lowercase : Any = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_lowercase : Tuple = []
_list.append([line.split()[0], line.split()[2]] )
_lowercase : List[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _A ( snake_case , snake_case ) -> Optional[int]:
with open(snake_case ) as f:
_lowercase : str = f.read(1 )
_lowercase : List[str] = start_node
_lowercase : int = []
_lowercase : Optional[Any] = start_node
_lowercase : Optional[Any] = 0
while visiting not in first_solution:
_lowercase : Optional[Any] = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(snake_case ) and k[0] not in first_solution:
_lowercase : int = k[1]
_lowercase : Tuple = k[0]
first_solution.append(snake_case )
_lowercase : Optional[Any] = distance_of_first_solution + int(snake_case )
_lowercase : List[Any] = best_node
first_solution.append(snake_case )
_lowercase : List[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_lowercase : List[str] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def _A ( snake_case , snake_case ) -> List[str]:
_lowercase : Dict = []
for n in solution[1:-1]:
_lowercase : Tuple = solution.index(snake_case )
for kn in solution[1:-1]:
_lowercase : Union[str, Any] = solution.index(snake_case )
if n == kn:
continue
_lowercase : List[str] = copy.deepcopy(snake_case )
_lowercase : str = kn
_lowercase : List[str] = n
_lowercase : List[Any] = 0
for k in _tmp[:-1]:
_lowercase : List[str] = _tmp[_tmp.index(snake_case ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_lowercase : Optional[Any] = distance + int(i[1] )
_tmp.append(snake_case )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_lowercase : Any = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda snake_case : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _A ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
_lowercase : Union[str, Any] = 1
_lowercase : List[Any] = first_solution
_lowercase : Optional[int] = []
_lowercase : Any = distance_of_first_solution
_lowercase : Union[str, Any] = solution
while count <= iters:
_lowercase : List[Any] = find_neighborhood(snake_case , snake_case )
_lowercase : int = 0
_lowercase : Optional[int] = neighborhood[index_of_best_solution]
_lowercase : int = len(snake_case ) - 1
_lowercase : List[str] = False
while not found:
_lowercase : int = 0
while i < len(snake_case ):
if best_solution[i] != solution[i]:
_lowercase : Optional[Any] = best_solution[i]
_lowercase : str = solution[i]
break
_lowercase : Tuple = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_lowercase : Optional[Any] = True
_lowercase : Dict = best_solution[:-1]
_lowercase : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_lowercase : Union[str, Any] = cost
_lowercase : Any = solution
else:
_lowercase : Tuple = index_of_best_solution + 1
_lowercase : List[Any] = neighborhood[index_of_best_solution]
if len(snake_case ) >= size:
tabu_list.pop(0 )
_lowercase : List[Any] = count + 1
return best_solution_ever, best_cost
def _A ( snake_case=None ) -> Optional[int]:
_lowercase : List[Any] = generate_neighbours(args.File )
_lowercase , _lowercase : Union[str, Any] = generate_first_solution(
args.File , snake_case )
_lowercase , _lowercase : Optional[int] = tabu_search(
snake_case , snake_case , snake_case , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 199
|
'''simple docstring'''
from math import pi, sqrt, tan
def _A ( snake_case ) -> float:
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def _A ( snake_case , snake_case , snake_case ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _A ( snake_case ) -> float:
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def _A ( snake_case ) -> float:
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def _A ( snake_case , snake_case ) -> float:
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _A ( snake_case , snake_case , snake_case ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
_lowercase : Union[str, Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _A ( snake_case , snake_case ) -> float:
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def _A ( snake_case , snake_case ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(snake_case , 2 ) * torus_radius * tube_radius
def _A ( snake_case , snake_case ) -> float:
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def _A ( snake_case ) -> float:
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def _A ( snake_case , snake_case ) -> float:
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def _A ( snake_case , snake_case , snake_case ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
_lowercase : Any = (sidea + sidea + sidea) / 2
_lowercase : List[str] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _A ( snake_case , snake_case ) -> float:
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def _A ( snake_case , snake_case , snake_case ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def _A ( snake_case ) -> float:
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def _A ( snake_case , snake_case ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def _A ( snake_case , snake_case ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def _A ( snake_case , snake_case ) -> float:
if not isinstance(snake_case , snake_case ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 199
| 1
|
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Dict = int(np.ceil((x_end - xa) / step_size))
SCREAMING_SNAKE_CASE : Tuple = np.zeros((n + 1,))
SCREAMING_SNAKE_CASE : int = ya
SCREAMING_SNAKE_CASE : int = xa
for k in range(_a):
SCREAMING_SNAKE_CASE : Any = y[k] + step_size * ode_func(_a , y[k])
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76
|
def lowerCamelCase__ ( _a , _a):
return int((input_a, input_a).count(1) != 0)
def lowerCamelCase__ ( ):
assert or_gate(0 , 0) == 0
assert or_gate(0 , 1) == 1
assert or_gate(1 , 0) == 1
assert or_gate(1 , 1) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 76
| 1
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__lowerCAmelCase , n - 1 , __lowerCAmelCase ) * a) % mod
else:
SCREAMING_SNAKE_CASE__ : List[Any] = binary_exponentiation(__lowerCAmelCase , n / 2 , __lowerCAmelCase )
return (b * b) % mod
# a prime number
a :int = 701
a :List[str] = 1_000_000_000
a :int = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 355
|
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowercase ( __lowerCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowercase ( ) -> Iterator[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def _lowercase ( __lowerCAmelCase = 200_0000 ) -> int:
return sum(takewhile(lambda __lowerCAmelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 56
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class UpperCAmelCase_ ( UpperCAmelCase_):
lowerCamelCase__ = """bloom"""
lowerCamelCase__ = ["""past_key_values"""]
lowerCamelCase__ = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self, __a=25_0880, __a=64, __a=2, __a=8, __a=1E-5, __a=0.02, __a=True, __a=1, __a=2, __a=False, __a=0.0, __a=0.0, __a=1, __a=False, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase : Union[str, Any] = kwargs.pop("n_embed", __lowercase)
_lowerCAmelCase : Any = hidden_size if n_embed is None else n_embed
_lowerCAmelCase : List[str] = n_layer
_lowerCAmelCase : Optional[int] = n_head
_lowerCAmelCase : Optional[Any] = layer_norm_epsilon
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : str = use_cache
_lowerCAmelCase : List[Any] = pretraining_tp
_lowerCAmelCase : Tuple = apply_residual_connection_post_layernorm
_lowerCAmelCase : int = hidden_dropout
_lowerCAmelCase : Optional[int] = attention_dropout
_lowerCAmelCase : Dict = bos_token_id
_lowerCAmelCase : List[Any] = eos_token_id
_lowerCAmelCase : int = slow_but_exact
super().__init__(bos_token_id=__lowercase, eos_token_id=__lowercase, **__lowercase)
class UpperCAmelCase_ ( UpperCAmelCase_):
lowerCamelCase__ = version.parse('1.12')
def __init__( self, __a, __a = "default", __a = None, __a = False, ):
'''simple docstring'''
super().__init__(__lowercase, task=__lowercase, patching_specs=__lowercase, use_past=__lowercase)
if not getattr(self._config, "pad_token_id", __lowercase):
# TODO: how to do that better?
_lowerCAmelCase : List[Any] = 0
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__lowercase, direction="inputs", inverted_values_shape=__lowercase)
_lowerCAmelCase : List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_lowerCAmelCase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def snake_case__ ( self):
'''simple docstring'''
return self._config.n_layer
@property
def snake_case__ ( self):
'''simple docstring'''
return self._config.n_head
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-3
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : str = super(__lowercase, self).generate_dummy_inputs(
__lowercase, batch_size=__lowercase, seq_length=__lowercase, is_pair=__lowercase, framework=__lowercase)
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : Optional[Any] = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
_lowerCAmelCase : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_lowerCAmelCase : List[str] = seqlen + 2
_lowerCAmelCase : Optional[int] = self._config.hidden_size // self.num_attention_heads
_lowerCAmelCase : Union[str, Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_lowerCAmelCase : Optional[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_lowerCAmelCase : Tuple = [
(torch.zeros(__lowercase), torch.zeros(__lowercase)) for _ in range(self.num_layers)
]
_lowerCAmelCase : str = common_inputs['''attention_mask''']
if self.use_past:
_lowerCAmelCase : int = ordered_inputs['''attention_mask'''].dtype
_lowerCAmelCase : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowercase, __lowercase, dtype=__lowercase)], dim=1)
return ordered_inputs
@property
def snake_case__ ( self):
'''simple docstring'''
return 13
| 36
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = """ctrl"""
a__ : Dict = ["""past_key_values"""]
a__ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowercase=246_534 , __lowercase=256 , __lowercase=1_280 , __lowercase=8_192 , __lowercase=48 , __lowercase=16 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1E-6 , __lowercase=0.02 , __lowercase=True , **__lowercase , ) -> List[Any]:
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :Optional[Any] = n_positions
__UpperCamelCase :Dict = n_embd
__UpperCamelCase :Dict = n_layer
__UpperCamelCase :List[Any] = n_head
__UpperCamelCase :int = dff
__UpperCamelCase :Union[str, Any] = resid_pdrop
__UpperCamelCase :Optional[int] = embd_pdrop
__UpperCamelCase :List[Any] = layer_norm_epsilon
__UpperCamelCase :Dict = initializer_range
__UpperCamelCase :Any = use_cache
super().__init__(**__lowercase)
| 43
| 0
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
lowercase_ : Dict = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(UpperCAmelCase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ):
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ):
if audio_length_in_s is None:
lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate
lowercase_ : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowercase_ : List[Any] = int(lowercase_ )
if sample_size % down_scale_factor != 0:
lowercase_ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
lowercase_ : Any = int(lowercase_ )
lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
# set step values
self.scheduler.set_timesteps(lowercase_ , device=audio.device )
lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase_ )
| 21
| 1
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
a : List[str] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class a ( unittest.TestCase ):
@classmethod
def A_ ( cls : str ):
snake_case_ = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def A_ ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A_ ( self : Tuple ):
snake_case_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
snake_case_ = FlaxBertModel(lowercase_ )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
snake_case_ = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
snake_case_ = flatten_dict(unfreeze(model.params ) )
snake_case_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase_ , repo_id='''test-model-flax''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
snake_case_ = flatten_dict(unfreeze(model.params ) )
snake_case_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1e-3 , msg=F"{key} not identical" )
def A_ ( self : Optional[Any] ):
snake_case_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
snake_case_ = FlaxBertModel(lowercase_ )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
snake_case_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
snake_case_ = flatten_dict(unfreeze(model.params ) )
snake_case_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowercase_ , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
snake_case_ = flatten_dict(unfreeze(model.params ) )
snake_case_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
snake_case_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1e-3 , msg=F"{key} not identical" )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = True
snake_case_ = flatten_dict(modela.params )
snake_case_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
snake_case_ = False
return models_are_equal
@require_flax
class a ( unittest.TestCase ):
def A_ ( self : Union[str, Any] ):
snake_case_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
snake_case_ = FlaxBertModel(lowercase_ )
snake_case_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase_ , lowercase_ ) )
with self.assertRaises(lowercase_ ):
snake_case_ = FlaxBertModel.from_pretrained(lowercase_ )
snake_case_ = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertTrue(check_models_equal(lowercase_ , lowercase_ ) )
def A_ ( self : Union[str, Any] ):
snake_case_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
snake_case_ = FlaxBertModel(lowercase_ )
snake_case_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase_ , lowercase_ ) , max_shard_size='''10KB''' )
with self.assertRaises(lowercase_ ):
snake_case_ = FlaxBertModel.from_pretrained(lowercase_ )
snake_case_ = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertTrue(check_models_equal(lowercase_ , lowercase_ ) )
def A_ ( self : str ):
snake_case_ = '''bert'''
snake_case_ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(lowercase_ ):
snake_case_ = FlaxBertModel.from_pretrained(lowercase_ )
snake_case_ = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertIsNotNone(lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = '''bert'''
snake_case_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(lowercase_ ):
snake_case_ = FlaxBertModel.from_pretrained(lowercase_ )
snake_case_ = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertIsNotNone(lowercase_ )
| 56
|
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = AutoencoderKL
snake_case_ = "sample"
snake_case_ = 1e-2
@property
def A_ ( self : Dict ):
snake_case_ = 4
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase_ )
return {"sample": image}
@property
def A_ ( self : List[Any] ):
return (3, 32, 32)
@property
def A_ ( self : Dict ):
return (3, 32, 32)
def A_ ( self : Union[str, Any] ):
snake_case_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def A_ ( self : Any ):
pass
def A_ ( self : str ):
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def A_ ( self : Dict ):
# enable deterministic behavior for gradient checkpointing
snake_case_ ,snake_case_ = self.prepare_init_args_and_inputs_for_common()
snake_case_ = self.model_class(**lowercase_ )
model.to(lowercase_ )
assert not model.is_gradient_checkpointing and model.training
snake_case_ = model(**lowercase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
snake_case_ = torch.randn_like(lowercase_ )
snake_case_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
snake_case_ = self.model_class(**lowercase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowercase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
snake_case_ = model_a(**lowercase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
snake_case_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
snake_case_ = dict(model.named_parameters() )
snake_case_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def A_ ( self : Tuple ):
snake_case_ ,snake_case_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(lowercase_ )
snake_case_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A_ ( self : Tuple ):
snake_case_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
snake_case_ = model.to(lowercase_ )
model.eval()
if torch_device == "mps":
snake_case_ = torch.manual_seed(0 )
else:
snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ = image.to(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ , sample_posterior=lowercase_ , generator=lowercase_ ).sample
snake_case_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
snake_case_ = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
snake_case_ = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
snake_case_ = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1e-2 ) )
@slow
class a ( unittest.TestCase ):
def A_ ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ):
return F"gaussian_noise_s={seed}_shape={'_'.join([str(lowercase_ ) for s in shape] )}.npy"
def A_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Dict , lowercase_ : List[Any]=0 , lowercase_ : Union[str, Any]=(4, 3, 512, 512) , lowercase_ : Optional[Any]=False ):
snake_case_ = torch.floataa if fpaa else torch.floataa
snake_case_ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase_ , lowercase_ ) ) ).to(lowercase_ ).to(lowercase_ )
return image
def A_ ( self : Any , lowercase_ : Dict="CompVis/stable-diffusion-v1-4" , lowercase_ : List[str]=False ):
snake_case_ = '''fp16''' if fpaa else None
snake_case_ = torch.floataa if fpaa else torch.floataa
snake_case_ = AutoencoderKL.from_pretrained(
lowercase_ , subfolder='''vae''' , torch_dtype=lowercase_ , revision=lowercase_ , )
model.to(lowercase_ ).eval()
return model
def A_ ( self : Any , lowercase_ : int=0 ):
if torch_device == "mps":
return torch.manual_seed(lowercase_ )
return torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Tuple ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ )
snake_case_ = self.get_generator(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ , generator=lowercase_ , sample_posterior=lowercase_ ).sample
assert sample.shape == image.shape
snake_case_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
snake_case_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowercase_ , lowercase_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Dict ):
snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ )
snake_case_ = self.get_sd_image(lowercase_ , fpaa=lowercase_ )
snake_case_ = self.get_generator(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ , generator=lowercase_ , sample_posterior=lowercase_ ).sample
assert sample.shape == image.shape
snake_case_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
snake_case_ = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A_ ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ ).sample
assert sample.shape == image.shape
snake_case_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
snake_case_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowercase_ , lowercase_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Dict , lowercase_ : Tuple , lowercase_ : Optional[int] ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
snake_case_ = sample[-1, -2:, :2, -2:].flatten().cpu()
snake_case_ = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[Any] ):
snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ )
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) , fpaa=lowercase_ )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
snake_case_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
snake_case_ = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def A_ ( self : Optional[Any] , lowercase_ : List[str] ):
snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ )
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) , fpaa=lowercase_ )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def A_ ( self : Optional[Any] , lowercase_ : Any ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def A_ ( self : str , lowercase_ : Optional[int] , lowercase_ : Tuple ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ )
snake_case_ = self.get_generator(lowercase_ )
with torch.no_grad():
snake_case_ = model.encode(lowercase_ ).latent_dist
snake_case_ = dist.sample(generator=lowercase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
snake_case_ = sample[0, -1, -3:, -3:].flatten().cpu()
snake_case_ = torch.tensor(lowercase_ )
snake_case_ = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(lowercase_ , lowercase_ , atol=lowercase_ )
| 56
| 1
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def __lowercase ( _UpperCamelCase ) ->np.ndarray:
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->np.ndarray:
"""simple docstring"""
lowercase : Optional[Any] = np.nan
for i in range(_UpperCamelCase ):
lowercase : str = features[:, labels == i]
lowercase : Dict = data.mean(1 )
# Centralize the data of class i
lowercase : List[Any] = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase, centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase : Optional[Any] = np.dot(_UpperCamelCase, centered_data.T )
return covariance_sum / features.shape[1]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->np.ndarray:
"""simple docstring"""
lowercase : Optional[int] = features.mean(1 )
lowercase : Dict = np.nan
for i in range(_UpperCamelCase ):
lowercase : int = features[:, labels == i]
lowercase : Union[str, Any] = data.shape[1]
lowercase : Union[str, Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ), (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T, )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase : Any = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ), (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T, )
return covariance_sum / features.shape[1]
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->np.ndarray:
"""simple docstring"""
if features.any():
lowercase : Dict = features.mean(1 )
# Center the dataset
lowercase : List[str] = features - np.reshape(_UpperCamelCase, (data_mean.size, 1) )
lowercase : str = np.dot(_UpperCamelCase, centered_data.T ) / features.shape[1]
lowercase , lowercase : int = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
lowercase : Any = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowercase : Optional[Any] = np.dot(filtered_eigenvectors.T, _UpperCamelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR, format='''%(message)s''', force=_UpperCamelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->np.ndarray:
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowercase , lowercase : List[Any] = eigh(
covariance_between_classes(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ), covariance_within_classes(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ), )
lowercase : str = eigenvectors[:, ::-1][:, :dimensions]
lowercase , lowercase , lowercase : List[Any] = np.linalg.svd(_UpperCamelCase )
lowercase : Dict = svd_matrix[:, 0:dimensions]
lowercase : Dict = np.dot(filtered_svd_matrix.T, _UpperCamelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR, format='''%(message)s''', force=_UpperCamelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __lowercase ( ) ->None:
"""simple docstring"""
lowercase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowercase : str = np.array([0, 0, 0, 1, 1] )
lowercase : Any = 2
lowercase : int = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
lowercase : Tuple = linear_discriminant_analysis(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
if isinstance(_UpperCamelCase, np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __lowercase ( ) ->None:
"""simple docstring"""
lowercase : Optional[int] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowercase : List[Any] = 2
lowercase : Tuple = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
lowercase : Optional[int] = principal_component_analysis(_UpperCamelCase, _UpperCamelCase )
if not np.allclose(_UpperCamelCase, _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173
|
# Algorithm for the pigeonhole sorting
def __lowercase ( _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
lowercase : List[Any] = min(_UpperCamelCase ) # min() finds the minimum value
lowercase : Union[str, Any] = max(_UpperCamelCase ) # max() finds the maximum value
lowercase : Tuple = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowercase : List[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_UpperCamelCase, _UpperCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowercase : Tuple = 0
for count in range(_UpperCamelCase ):
while holes[count] > 0:
holes[count] -= 1
lowercase : str = count + min_val
i += 1
def __lowercase ( ) ->List[str]:
"""simple docstring"""
lowercase : Union[str, Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_UpperCamelCase )
print('''Sorted order is:''', ''' '''.join(_UpperCamelCase ) )
if __name__ == "__main__":
main()
| 173
| 1
|
"""simple docstring"""
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
__a : str = set_counts
__a : str = max(_UpperCAmelCase )
__a : List[str] = len(_UpperCAmelCase )
__a : Any = [1] * num_sets
__a : Dict = list(range(_UpperCAmelCase ) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = self.get_parent(_UpperCAmelCase )
__a : Union[str, Any] = self.get_parent(_UpperCAmelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__a : Any = 0
__a : Tuple = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__a : Dict = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__a : Union[str, Any] = 0
__a : str = src_parent
__a : Tuple = self.set_counts[src_parent]
__a : List[Any] = max(self.max_set , _UpperCAmelCase )
return True
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.parents[disj_set] == disj_set:
return disj_set
__a : Optional[Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 160
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def __A ( a_ :Tuple) -> Tuple:
__a : int = torch.load(a_ , map_location='''cpu''')
if "model" in sd.keys():
__a : Optional[Any] = torch.load(a_ , map_location='''cpu''')['''model''']
# pop unnecessary weights
__a : Optional[Any] = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(a_)
__a : Tuple = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__a : Tuple = sd.pop(a_)
__a : List[Any] = list(sd.keys())
for key in keys:
if ".qkv_proj." in key:
__a : List[str] = sd[key]
# We split QKV in separate Q,K,V
__a : Optional[int] = key.replace('''.qkv_proj.''' , '''.q_proj.''')
__a : List[Any] = key.replace('''.qkv_proj.''' , '''.k_proj.''')
__a : Any = key.replace('''.qkv_proj.''' , '''.v_proj.''')
__a : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__a , __a , __a : Dict = torch.split(a_ , depth // 3 , dim=0)
__a : Tuple = q
__a : Optional[Any] = k
__a : Tuple = v
del sd[key]
return sd
@torch.no_grad()
def __A ( a_ :str , a_ :Tuple , a_ :Any=None) -> List[str]:
__a : str = load_checkpoint(a_)
if config is not None:
__a : Union[str, Any] = OPTConfig.from_pretrained(a_)
else:
__a : Any = OPTConfig()
__a : List[str] = OPTModel(a_).half().eval()
model.load_state_dict(a_)
# Check results
Path(a_).mkdir(exist_ok=a_)
model.save_pretrained(a_)
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
A = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 160
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 317
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __UpperCAmelCase ( snake_case_ : list[int] , snake_case_ : tuple[int, ...] ) -> str | None:
"""simple docstring"""
_lowerCAmelCase = ""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(snake_case_ ) , snake_case_ ):
_lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case_ )
return decoded
def __UpperCAmelCase ( snake_case_ : list[int] ) -> list[str]:
"""simple docstring"""
_lowerCAmelCase = []
for key in product(snake_case_ , repeat=3 ):
_lowerCAmelCase = try_key(snake_case_ , snake_case_ )
if encoded is not None:
possibles.append(snake_case_ )
return possibles
def __UpperCAmelCase ( snake_case_ : list[str] , snake_case_ : str ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def __UpperCAmelCase ( snake_case_ : str = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = Path(snake_case_ ).parent.joinpath(snake_case_ ).read_text(encoding="""utf-8""" )
_lowerCAmelCase = [int(snake_case_ ) for number in data.strip().split(""",""" )]
_lowerCAmelCase = filter_valid_chars(snake_case_ )
for common_word in COMMON_WORDS:
_lowerCAmelCase = filter_common_word(snake_case_ , snake_case_ )
if len(snake_case_ ) == 1:
break
_lowerCAmelCase = possibles[0]
return sum(ord(snake_case_ ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowercase (snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str]=1_024 , snake_case__ : List[Any]=1_024 , snake_case__ : Union[str, Any]=False , **snake_case__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = AutoTokenizer.from_pretrained(snake_case__ )
lowerCAmelCase = SeqaSeqDataset(snake_case__ , snake_case__ , snake_case__ , snake_case__ , type_path="""train""" , **snake_case__ )
lowerCAmelCase = tok.pad_token_id
def get_lens(snake_case__ : List[Any] ):
lowerCAmelCase = tqdm(
DataLoader(snake_case__ , batch_size=512 , num_workers=8 , shuffle=snake_case__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase = []
for batch in dl:
lowerCAmelCase = batch["""input_ids"""].ne(snake_case__ ).sum(1 ).tolist()
lowerCAmelCase = batch["""labels"""].ne(snake_case__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(snake_case__ , snake_case__ ):
max_lens.append(max(snake_case__ , snake_case__ ) )
else:
max_lens.extend(snake_case__ )
return max_lens
lowerCAmelCase = get_lens(snake_case__ )
lowerCAmelCase = SeqaSeqDataset(snake_case__ , snake_case__ , snake_case__ , snake_case__ , type_path="""val""" , **snake_case__ )
lowerCAmelCase = get_lens(snake_case__ )
pickle_save(snake_case__ , train_ds.len_file )
pickle_save(snake_case__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 155
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
a = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _a ):
def __init__( self : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : str ):
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 155
| 1
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
UpperCamelCase__ = get_logger(__name__)
class A :
def __init__(self : List[str] , __UpperCAmelCase : Optional[str] = None ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = (
os.path.join(__UpperCAmelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCAmelCase__ = Extractor
def lowercase_ (self : str , __UpperCAmelCase : str ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCAmelCase__ = os.path.abspath(__UpperCAmelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(__UpperCAmelCase ) )
def lowercase_ (self : int , __UpperCAmelCase : str , __UpperCAmelCase : bool ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(__UpperCAmelCase ) and not (os.path.isdir(__UpperCAmelCase ) and os.listdir(__UpperCAmelCase ))
)
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : bool = False ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.extractor.infer_extractor_format(__UpperCAmelCase )
if not extractor_format:
return input_path
UpperCAmelCase__ = self._get_output_path(__UpperCAmelCase )
if self._do_extract(__UpperCAmelCase , __UpperCAmelCase ):
self.extractor.extract(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return output_path
class A ( UpperCAmelCase_ ):
@classmethod
@abstractmethod
def lowercase_ (cls : Dict , __UpperCAmelCase : Union[Path, str] , **__UpperCAmelCase : Dict ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
...
class A ( UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : List[bytes] = []
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
with open(__UpperCAmelCase , "rb" ) as f:
return f.read(__UpperCAmelCase )
@classmethod
def lowercase_ (cls : Any , __UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : bytes = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
UpperCAmelCase__ = max(len(__UpperCAmelCase ) for cls_magic_number in cls.magic_numbers )
try:
UpperCAmelCase__ = cls.read_magic_number(__UpperCAmelCase , __UpperCAmelCase )
except OSError:
return False
return any(magic_number.startswith(__UpperCAmelCase ) for cls_magic_number in cls.magic_numbers )
class A ( UpperCAmelCase_ ):
@classmethod
def lowercase_ (cls : Optional[int] , __UpperCAmelCase : Union[Path, str] , **__UpperCAmelCase : Tuple ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(__UpperCAmelCase )
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ) -> int:
"""simple docstring"""
def resolved(__UpperCAmelCase : str ) -> str:
return os.path.realpath(os.path.abspath(__UpperCAmelCase ) )
def badpath(__UpperCAmelCase : str , __UpperCAmelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ).startswith(__UpperCAmelCase )
def badlink(__UpperCAmelCase : List[str] , __UpperCAmelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCAmelCase__ = resolved(os.path.join(__UpperCAmelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__UpperCAmelCase )
UpperCAmelCase__ = resolved(__UpperCAmelCase )
for finfo in members:
if badpath(finfo.name , __UpperCAmelCase ):
logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(__UpperCAmelCase , __UpperCAmelCase ):
logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(__UpperCAmelCase , __UpperCAmelCase ):
logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
UpperCAmelCase__ = tarfile.open(__UpperCAmelCase )
tar_file.extractall(__UpperCAmelCase , members=TarExtractor.safemembers(__UpperCAmelCase , __UpperCAmelCase ) )
tar_file.close()
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[int] = [b'\x1F\x8B']
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
with gzip.open(__UpperCAmelCase , "rb" ) as gzip_file:
with open(__UpperCAmelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__UpperCAmelCase , __UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def lowercase_ (cls : Optional[Any] , __UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : bytes = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(__UpperCAmelCase , magic_number=__UpperCAmelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__UpperCAmelCase , "rb" ) as fp:
UpperCAmelCase__ = _EndRecData(__UpperCAmelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCAmelCase__ = fp.read(__UpperCAmelCase ) # CD is where we expect it to be
if len(__UpperCAmelCase ) == sizeCentralDir:
UpperCAmelCase__ = struct.unpack(__UpperCAmelCase , __UpperCAmelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
with zipfile.ZipFile(__UpperCAmelCase , "r" ) as zip_file:
zip_file.extractall(__UpperCAmelCase )
zip_file.close()
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
with lzma.open(__UpperCAmelCase ) as compressed_file:
with open(__UpperCAmelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__UpperCAmelCase , __UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Union[str, Any] = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
UpperCAmelCase__ = rarfile.RarFile(__UpperCAmelCase )
rf.extractall(__UpperCAmelCase )
rf.close()
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = [b'\x28\xb5\x2F\xFD']
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
UpperCAmelCase__ = zstd.ZstdDecompressor()
with open(__UpperCAmelCase , "rb" ) as ifh, open(__UpperCAmelCase , "wb" ) as ofh:
dctx.copy_stream(__UpperCAmelCase , __UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = [b'\x42\x5A\x68']
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
with bza.open(__UpperCAmelCase , "rb" ) as compressed_file:
with open(__UpperCAmelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__UpperCAmelCase , __UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Any = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
with pyazr.SevenZipFile(__UpperCAmelCase , "r" ) as archive:
archive.extractall(__UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = [b'\x04\x22\x4D\x18']
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(__UpperCAmelCase , "rb" ) as compressed_file:
with open(__UpperCAmelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__UpperCAmelCase , __UpperCAmelCase )
class A :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__UpperCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowercase_ (cls : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return max(
len(__UpperCAmelCase )
for extractor in cls.extractors.values()
if issubclass(__UpperCAmelCase , __UpperCAmelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(__UpperCAmelCase , magic_number_length=__UpperCAmelCase )
except OSError:
return b""
@classmethod
def lowercase_ (cls : Union[str, Any] , __UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : bool = False ) -> bool:
"""simple docstring"""
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=__UpperCAmelCase , )
UpperCAmelCase__ = cls.infer_extractor_format(__UpperCAmelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowercase_ (cls : str , __UpperCAmelCase : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
UpperCAmelCase__ = cls._get_magic_number_max_length()
UpperCAmelCase__ = cls._read_magic_number(__UpperCAmelCase , __UpperCAmelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__UpperCAmelCase , magic_number=__UpperCAmelCase ):
return extractor_format
@classmethod
def lowercase_ (cls : Dict , __UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[BaseExtractor] = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(__UpperCAmelCase ) , exist_ok=__UpperCAmelCase )
# Prevent parallel extractions
UpperCAmelCase__ = str(Path(__UpperCAmelCase ).with_suffix(".lock" ) )
with FileLock(__UpperCAmelCase ):
shutil.rmtree(__UpperCAmelCase , ignore_errors=__UpperCAmelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__UpperCAmelCase , __UpperCAmelCase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=__UpperCAmelCase , )
UpperCAmelCase__ = extractor if extractor != "deprecated" else extractor_format
else:
UpperCAmelCase__ = cls.extractors[extractor_format]
return extractor.extract(__UpperCAmelCase , __UpperCAmelCase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=__UpperCAmelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__UpperCAmelCase ):
return extractor.extract(__UpperCAmelCase , __UpperCAmelCase )
| 143
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __A, __A=False ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( __A, __A, __A=False ) -> Tuple:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase__ = ""
else:
UpperCAmelCase__ = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase__ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ = in_proj_bias[: config.hidden_size]
UpperCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = dct.pop(__A )
UpperCAmelCase__ = val
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__ = Image.open(requests.get(__A, stream=__A ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __A, __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase__ = 1_000
UpperCAmelCase__ = "huggingface/label-files"
UpperCAmelCase__ = "imagenet-1k-id2label.json"
UpperCAmelCase__ = json.load(open(hf_hub_download(__A, __A, repo_type="dataset" ), "r" ) )
UpperCAmelCase__ = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ = int(deit_name[-6:-4] )
UpperCAmelCase__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCAmelCase__ = 192
UpperCAmelCase__ = 768
UpperCAmelCase__ = 12
UpperCAmelCase__ = 3
elif deit_name[9:].startswith("small" ):
UpperCAmelCase__ = 384
UpperCAmelCase__ = 1_536
UpperCAmelCase__ = 12
UpperCAmelCase__ = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCAmelCase__ = 1_024
UpperCAmelCase__ = 4_096
UpperCAmelCase__ = 24
UpperCAmelCase__ = 16
# load original model from timm
UpperCAmelCase__ = timm.create_model(__A, pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ = timm_model.state_dict()
UpperCAmelCase__ = create_rename_keys(__A, __A )
for src, dest in rename_keys:
rename_key(__A, __A, __A )
read_in_q_k_v(__A, __A, __A )
# load HuggingFace model
UpperCAmelCase__ = DeiTForImageClassificationWithTeacher(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase__ = DeiTImageProcessor(size=__A, crop_size=config.image_size )
UpperCAmelCase__ = image_processor(images=prepare_img(), return_tensors="pt" )
UpperCAmelCase__ = encoding["pixel_values"]
UpperCAmelCase__ = model(__A )
UpperCAmelCase__ = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A, outputs.logits, atol=1e-3 )
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 143
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE :int = 16
SCREAMING_SNAKE_CASE :Any = 32
def UpperCAmelCase ( a_ , a_ = 1_6 , a_ = "bert-base-cased" ) -> Union[str, Any]:
"""simple docstring"""
__A = AutoTokenizer.from_pretrained(a_ )
__A = load_dataset("glue" , "mrpc" )
def tokenize_function(a_ ):
# max_length=None => use the model max length (it's actually the default)
__A = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a_ , max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__A = datasets.map(
a_ , batched=a_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=a_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__A = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(a_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a_ , padding="max_length" , max_length=1_2_8 , return_tensors="pt" )
return tokenizer.pad(a_ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
__A = DataLoader(
tokenized_datasets["train"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
__A = DataLoader(
tokenized_datasets["validation"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
return train_dataloader, eval_dataloader
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
__A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__A = config["lr"]
__A = int(config["num_epochs"] )
__A = int(config["seed"] )
__A = int(config["batch_size"] )
__A = args.model_name_or_path
set_seed(a_ )
__A , __A = get_dataloaders(a_ , a_ , a_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__A = AutoModelForSequenceClassification.from_pretrained(a_ , return_dict=a_ )
# Instantiate optimizer
__A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__A = optimizer_cls(params=model.parameters() , lr=a_ )
if accelerator.state.deepspeed_plugin is not None:
__A = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
__A = 1
__A = (len(a_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__A = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=0 , num_training_steps=a_ , )
else:
__A = DummyScheduler(a_ , total_num_steps=a_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__A , __A , __A , __A , __A = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ )
# We need to keep track of how many total steps we have iterated over
__A = 0
# We also need to keep track of the stating epoch so files are named properly
__A = 0
# Now we train the model
__A = evaluate.load("glue" , "mrpc" )
__A = 0
__A = {}
for epoch in range(a_ , a_ ):
model.train()
for step, batch in enumerate(a_ ):
__A = model(**a_ )
__A = outputs.loss
__A = loss / gradient_accumulation_steps
accelerator.backward(a_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__A = 0
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__A = model(**a_ )
__A = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__A , __A = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(a_ ) - 1:
__A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=a_ , references=a_ , )
__A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , a_ )
__A = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
__A = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(a_ , a_ )
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
__A = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=a_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=a_ , )
parser.add_argument(
"--output_dir" , type=a_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=a_ , default=a_ , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=a_ , default=3 , help="Number of train epochs." , )
__A = parser.parse_args()
__A = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6}
training_function(a_ , a_ )
if __name__ == "__main__":
main()
| 15
|
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if len(a_ ) <= 1:
return [tuple(a_ )]
__A = []
def generate(a_ , a_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , a_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__A , __A = arr[k - 1], arr[i]
else: # k is odd
__A , __A = arr[k - 1], arr[0]
generate(k - 1 , a_ )
generate(len(a_ ) , a_ )
return res
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE :Dict = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 15
| 1
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowerCamelCase ( a_ ) -> List[str]:
lowerCAmelCase_ = args.pruning_method
lowerCAmelCase_ = args.threshold
lowerCAmelCase_ = args.model_name_or_path.rstrip('/' )
lowerCAmelCase_ = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
lowerCAmelCase_ = torch.load(os.path.join(a_ , 'pytorch_model.bin' ) )
lowerCAmelCase_ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCAmelCase_ = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
lowerCAmelCase_ = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
lowerCAmelCase_ = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
lowerCAmelCase_ = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_ )
lowerCAmelCase_ = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCAmelCase_ = name[:-6]
lowerCAmelCase_ = model[F'''{prefix_}mask_scores''']
lowerCAmelCase_ = TopKBinarizer.apply(a_ , a_ )
lowerCAmelCase_ = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCAmelCase_ = name[:-6]
lowerCAmelCase_ = model[F'''{prefix_}mask_scores''']
lowerCAmelCase_ = ThresholdBinarizer.apply(a_ , a_ , a_ )
lowerCAmelCase_ = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCAmelCase_ = name[:-6]
lowerCAmelCase_ = model[F'''{prefix_}mask_scores''']
lowerCAmelCase_ , lowerCAmelCase_ = -0.1, 1.1
lowerCAmelCase_ = torch.sigmoid(a_ )
lowerCAmelCase_ = s * (r - l) + l
lowerCAmelCase_ = s_bar.clamp(min=0.0 , max=1.0 )
lowerCAmelCase_ = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
lowerCAmelCase_ = os.path.join(
os.path.dirname(a_ ) , F'''bertarized_{os.path.basename(a_ )}''' )
if not os.path.isdir(a_ ):
shutil.copytree(a_ , a_ )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(a_ , os.path.join(a_ , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
lowerCamelCase_ = parser.parse_args()
main(args)
| 368
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase ( a_ ) -> Any:
lowerCAmelCase_ = tmp_path / 'file.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> List[Any]:
lowerCAmelCase_ = tmp_path / 'malformed_file.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ , a_ ) -> List[str]:
lowerCAmelCase_ = tmp_path / 'csv_with_image.csv'
lowerCAmelCase_ = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> int:
lowerCAmelCase_ = tmp_path / 'csv_with_label.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = tmp_path / 'csv_with_int_list.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
def lowerCamelCase ( a_ , a_ , a_ ) -> Optional[Any]:
lowerCAmelCase_ = Csv()
lowerCAmelCase_ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(a_ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(a_ ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase ( a_ ) -> Optional[Any]:
with open(a_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = f.read().splitlines()[1]
lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_image]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
lowerCAmelCase_ = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase ( a_ ) -> int:
with open(a_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = f.read().splitlines()[1:]
lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_label]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
lowerCAmelCase_ = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(a_ ) for label in labels]
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda a_ : [int(a_ ) for i in x.split()]} )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_int_list]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
lowerCAmelCase_ = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 14
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :int = {'''vocab_file''': '''spiece.model'''}
SCREAMING_SNAKE_CASE :List[str] = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
SCREAMING_SNAKE_CASE :int = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE :str = 0
SCREAMING_SNAKE_CASE :Dict = 1
SCREAMING_SNAKE_CASE :Tuple = 2
SCREAMING_SNAKE_CASE :int = 3
SCREAMING_SNAKE_CASE :Optional[int] = 4
class __lowerCAmelCase ( snake_case_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = 'left'
def __init__( self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict=False , _lowerCAmelCase : str=True , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : List[Any]="</s>" , _lowerCAmelCase : List[str]="<unk>" , _lowerCAmelCase : Any="<sep>" , _lowerCAmelCase : str="<pad>" , _lowerCAmelCase : Any="<cls>" , _lowerCAmelCase : Any="<mask>" , _lowerCAmelCase : Tuple=["<eop>", "<eod>"] , _lowerCAmelCase : List[str] = None , **_lowerCAmelCase : str , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
snake_case_ = 3
snake_case_ = do_lower_case
snake_case_ = remove_space
snake_case_ = keep_accents
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> str:
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Union[str, Any] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
if self.remove_space:
snake_case_ = " ".join(inputs.strip().split() )
else:
snake_case_ = inputs
snake_case_ = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
snake_case_ = unicodedata.normalize("NFKD" , _lowerCAmelCase )
snake_case_ = "".join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase )] )
if self.do_lower_case:
snake_case_ = outputs.lower()
return outputs
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : Any ) -> List[str]:
"""simple docstring"""
snake_case_ = self.preprocess_text(_lowerCAmelCase )
snake_case_ = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
snake_case_ = []
for piece in pieces:
if len(_lowerCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
snake_case_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case_ = cur_pieces[1:]
else:
snake_case_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCAmelCase )
else:
new_pieces.append(_lowerCAmelCase )
return new_pieces
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.PieceToId(_lowerCAmelCase )
def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(_lowerCAmelCase )
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : int ) -> Tuple:
"""simple docstring"""
snake_case_ = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip()
return out_string
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict = False , _lowerCAmelCase : Any = None , _lowerCAmelCase : Any = True , **_lowerCAmelCase : List[Any] , ) -> str:
"""simple docstring"""
snake_case_ = kwargs.pop("use_source_tokenizer" , _lowerCAmelCase )
snake_case_ = self.convert_ids_to_tokens(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case_ = []
snake_case_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCAmelCase ) )
snake_case_ = []
sub_texts.append(_lowerCAmelCase )
else:
current_sub_text.append(_lowerCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
snake_case_ = "".join(_lowerCAmelCase )
snake_case_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case_ = self.clean_up_tokenization(_lowerCAmelCase )
return clean_text
else:
return text
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] = None ) -> List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : str = None , _lowerCAmelCase : int = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1, 1]
return ([0] * len(_lowerCAmelCase )) + [1, 1]
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple = None ) -> List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Dict = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 159
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19
| 0
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCamelCase = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _A ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F'Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'
F' reinstalling {pkg}.' )
if not ops[op](version.parse(lowerCAmelCase_ ) , version.parse(lowerCAmelCase_ ) ):
raise ImportError(
F'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}' )
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
lowerCAmelCase__ = F'\n{hint}' if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" , lowerCAmelCase_ ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = requirement, None, None
else:
lowerCAmelCase__ = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , lowerCAmelCase_ )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F' got {requirement}' )
lowerCAmelCase__ , lowerCAmelCase__ = match[0]
lowerCAmelCase__ = want_full.split("," ) # there could be multiple requirements
lowerCAmelCase__ = {}
for w in want_range:
lowerCAmelCase__ = re.findall(r"^([\s!=<>]{1,2})(.+)" , lowerCAmelCase_ )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F' but got {requirement}' )
lowerCAmelCase__ , lowerCAmelCase__ = match[0]
lowerCAmelCase__ = want_ver
if op not in ops:
raise ValueError(F'{requirement}: need one of {list(ops.keys() )}, but got {op}' )
# special case
if pkg == "python":
lowerCAmelCase__ = ".".join([str(lowerCAmelCase_ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return
# check if any version is installed
try:
lowerCAmelCase__ = importlib.metadata.version(lowerCAmelCase_ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'The \'{requirement}\' distribution was not found and is required by this application. {hint}' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(lowerCAmelCase_ , lowerCAmelCase_ )
| 366
|
from statistics import mean, stdev
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int = 3 ):
"""simple docstring"""
lowerCAmelCase__ = min(lowerCAmelCase_ )
lowerCAmelCase__ = max(lowerCAmelCase_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowerCAmelCase_ ) for x in data]
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int = 3 ):
"""simple docstring"""
lowerCAmelCase__ = mean(lowerCAmelCase_ )
lowerCAmelCase__ = stdev(lowerCAmelCase_ )
# standardize data
return [round((x - mu) / (sigma) , lowerCAmelCase_ ) for x in data]
| 221
| 0
|
"""simple docstring"""
import os
def _snake_case ( UpperCAmelCase_ : str = "matrix.txt" ):
with open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) as in_file:
A__ = in_file.read()
A__ = [[int(UpperCAmelCase_ ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
A__ = [[0 for cell in row] for row in grid]
A__ = len(grid[0] )
A__ = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )]
A__ = grid[0][0]
for i in range(1 , UpperCAmelCase_ ):
A__ = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCAmelCase_ ):
A__ = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCAmelCase_ ):
for j in range(1 , UpperCAmelCase_ ):
A__ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 335
|
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: Dict ):
"""simple docstring"""
A__ = {}
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase , """ -> """ , """ -> """.join([str(UpperCamelCase ) for j in self.vertex[i]] ) )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase )
else:
# else make a new vertex
A__ = [to_vertex]
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: list ):
"""simple docstring"""
A__ = True
print(UpperCamelCase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 335
| 1
|
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__lowerCAmelCase : List[Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase ( _lowerCamelCase : str ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_lowerCamelCase ):
return ext
raise Exception(
F"Unable to determine file format from file extension {path}. "
F"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def UpperCamelCase ( _lowerCamelCase : int ):
A__ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
A__ = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
A__ = PipelineDataFormat.from_str(
format=_lowerCamelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_lowerCamelCase , _lowerCamelCase )
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , lowercase_ :Pipeline , lowercase_ :PipelineDataFormat )-> str:
A__ = nlp
A__ = reader
@staticmethod
def UpperCAmelCase_ ( lowercase_ :ArgumentParser )-> Optional[Any]:
A__ = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=lowercase_ , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=lowercase_ , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=lowercase_ , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=lowercase_ , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=lowercase_ , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=lowercase_ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=lowercase_ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=lowercase_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=lowercase_ )
def UpperCAmelCase_ ( self :Dict )-> Optional[Any]:
A__, A__ = self._nlp, []
for entry in self._reader:
A__ = nlp(**lowercase_ ) if self._reader.is_multi_columns else nlp(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
outputs.append(lowercase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
A__ = self._reader.save_binary(lowercase_ )
logger.warning(F"Current pipeline requires output to be in binary format, saving at {binary_path}" )
else:
self._reader.save(lowercase_ )
| 353
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
def UpperCamelCase ( _lowerCamelCase : str ):
A__ = torch.load(_lowerCamelCase , map_location="cpu" )
if "model" in sd.keys():
A__ = torch.load(_lowerCamelCase , map_location="cpu" )["model"]
# pop unnecessary weights
A__ = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCamelCase )
A__ = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
A__ = sd.pop(_lowerCamelCase )
A__ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
A__ = sd[key]
# We split QKV in separate Q,K,V
A__ = key.replace(".qkv_proj." , ".q_proj." )
A__ = key.replace(".qkv_proj." , ".k_proj." )
A__ = key.replace(".qkv_proj." , ".v_proj." )
A__ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
A__, A__, A__ = torch.split(_lowerCamelCase , depth // 3 , dim=0 )
A__ = q
A__ = k
A__ = v
del sd[key]
return sd
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : Dict=None ):
A__ = load_checkpoint(_lowerCamelCase )
if config is not None:
A__ = OPTConfig.from_pretrained(_lowerCamelCase )
else:
A__ = OPTConfig()
A__ = OPTModel(_lowerCamelCase ).half().eval()
model.load_state_dict(_lowerCamelCase )
# Check results
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 123
| 0
|
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
__A = """path-to-your-trained-model"""
__A = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
__A = """A photo of sks dog in a bucket"""
__A = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 293
|
"""simple docstring"""
from __future__ import annotations
__A = tuple[int, int, int]
__A = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__A = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
__A = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
__A = """FOBHMDKEXQNRAULPGSJVTYICZW"""
__A = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
__A = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
__A = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
__A = """SGLCPQWZHKXAREONTFBVIYJUDM"""
__A = """HVSICLTYKQUBXDWAJZOMFGPREN"""
__A = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
__A = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
__A = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
"""simple docstring"""
if (unique_rotsel := len(set(_SCREAMING_SNAKE_CASE ) )) < 3:
lowerCAmelCase__ :Union[str, Any] = F"Please use 3 unique rotors (not {unique_rotsel})"
raise Exception(_SCREAMING_SNAKE_CASE )
# Checks if rotor positions are valid
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = rotpos
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Tuple = F"First rotor position is not within range of 1..26 ({rotorposa}"
raise ValueError(_SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = F"Second rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Union[str, Any] = F"Third rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_SCREAMING_SNAKE_CASE )
# Validates string and returns dict
lowerCAmelCase__ :int = _plugboard(_SCREAMING_SNAKE_CASE )
return rotpos, rotsel, pbdict
def __A (_SCREAMING_SNAKE_CASE ) ->dict[str, str]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :str = F"Plugboard setting isn't type string ({type(_SCREAMING_SNAKE_CASE )})"
raise TypeError(_SCREAMING_SNAKE_CASE )
elif len(_SCREAMING_SNAKE_CASE ) % 2 != 0:
lowerCAmelCase__ :str = F"Odd number of symbols ({len(_SCREAMING_SNAKE_CASE )})"
raise Exception(_SCREAMING_SNAKE_CASE )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
lowerCAmelCase__ :Any = set()
for i in pbstring:
if i not in abc:
lowerCAmelCase__ :Any = F"'{i}' not in list of symbols"
raise Exception(_SCREAMING_SNAKE_CASE )
elif i in tmppbl:
lowerCAmelCase__ :Dict = F"Duplicate symbol ({i})"
raise Exception(_SCREAMING_SNAKE_CASE )
else:
tmppbl.add(_SCREAMING_SNAKE_CASE )
del tmppbl
# Created the dictionary
lowerCAmelCase__ :List[Any] = {}
for j in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 , 2 ):
lowerCAmelCase__ :Optional[int] = pbstring[j + 1]
lowerCAmelCase__ :Union[str, Any] = pbstring[j]
return pb
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , _SCREAMING_SNAKE_CASE = "" , ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Tuple = text.upper()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = _validator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , plugb.upper() )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = rotor_position
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowerCAmelCase__ :Dict = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowerCAmelCase__ :Dict = plugboard[symbol]
# rotor ra --------------------------
lowerCAmelCase__ :Optional[int] = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
lowerCAmelCase__ :str = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# rotor rb --------------------------
lowerCAmelCase__ :Optional[int] = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
lowerCAmelCase__ :int = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# rotor rc --------------------------
lowerCAmelCase__ :str = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
lowerCAmelCase__ :Optional[Any] = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowerCAmelCase__ :str = reflector[symbol]
# 2nd rotors
lowerCAmelCase__ :Tuple = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
lowerCAmelCase__ :Optional[int] = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
lowerCAmelCase__ :Any = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowerCAmelCase__ :Union[str, Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :str = 0
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :List[Any] = 0
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = """This is my Python script that emulates the Enigma machine from WWII."""
__A = (1, 1, 1)
__A = """pictures"""
__A = (rotora, rotora, rotora)
__A = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 293
| 1
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a ( __UpperCamelCase ):
def A ( self : Optional[Any] ):
lowerCAmelCase_ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase , """embed_dim""" ) )
self.parent.assertTrue(hasattr(UpperCAmelCase , """num_heads""" ) )
class __a :
def __init__( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : Dict=64 , UpperCAmelCase : int=3 , UpperCAmelCase : Optional[int]=[16, 48, 96] , UpperCAmelCase : Union[str, Any]=[1, 3, 6] , UpperCAmelCase : Optional[Any]=[1, 2, 10] , UpperCAmelCase : str=[7, 3, 3] , UpperCAmelCase : Optional[Any]=[4, 2, 2] , UpperCAmelCase : Any=[2, 1, 1] , UpperCAmelCase : List[Any]=[2, 2, 2] , UpperCAmelCase : List[str]=[False, False, True] , UpperCAmelCase : Dict=[0.0, 0.0, 0.0] , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : List[str]=1e-1_2 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=2 , ):
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : Any = image_size
lowerCAmelCase_ : Union[str, Any] = patch_sizes
lowerCAmelCase_ : Any = patch_stride
lowerCAmelCase_ : str = patch_padding
lowerCAmelCase_ : str = is_training
lowerCAmelCase_ : Tuple = use_labels
lowerCAmelCase_ : Tuple = num_labels
lowerCAmelCase_ : Optional[Any] = num_channels
lowerCAmelCase_ : Optional[Any] = embed_dim
lowerCAmelCase_ : int = num_heads
lowerCAmelCase_ : str = stride_kv
lowerCAmelCase_ : Union[str, Any] = depth
lowerCAmelCase_ : Optional[Any] = cls_token
lowerCAmelCase_ : List[Any] = attention_drop_rate
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : int = None
if self.use_labels:
# create a random int32 tensor of given shape
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def A ( self : str ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A ( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Optional[int] = TFCvtModel(config=UpperCAmelCase )
lowerCAmelCase_ : Tuple = model(UpperCAmelCase , training=UpperCAmelCase )
lowerCAmelCase_ : int = (self.image_size, self.image_size)
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowerCAmelCase_ : str = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowerCAmelCase_ : Union[str, Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : List[Any] = self.num_labels
lowerCAmelCase_ : Any = TFCvtForImageClassification(UpperCAmelCase )
lowerCAmelCase_ : Any = model(UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : int ):
lowerCAmelCase_ : Any = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = config_and_inputs
lowerCAmelCase_ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__snake_case : Any = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
__snake_case : List[Any] = False
__snake_case : Dict = False
__snake_case : Optional[Any] = False
__snake_case : Optional[Any] = False
__snake_case : Optional[int] = False
def A ( self : Optional[int] ):
lowerCAmelCase_ : Optional[int] = TFCvtModelTester(self )
lowerCAmelCase_ : Optional[int] = TFCvtConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def A ( self : Tuple ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def A ( self : Optional[Any] ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def A ( self : List[str] ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def A ( self : List[Any] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def A ( self : int ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def A ( self : Any ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def A ( self : List[str] ):
lowerCAmelCase_ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(UpperCAmelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = model_class(UpperCAmelCase )
lowerCAmelCase_ : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A ( self : Union[str, Any] ):
def check_hidden_states_output(UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : Tuple = model_class(UpperCAmelCase )
lowerCAmelCase_ : Tuple = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCAmelCase_ : List[str] = outputs.hidden_states
lowerCAmelCase_ : int = len(self.model_tester.depth )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Dict = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : List[str] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A ( self : List[Any] ):
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : int ):
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def A ( self : List[str] ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Tuple = TFCvtModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def A ( self : Any ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self : int ):
lowerCAmelCase_ : Dict = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase_ : int = self.default_image_processor
lowerCAmelCase_ : Union[str, Any] = prepare_img()
lowerCAmelCase_ : Any = image_processor(images=UpperCAmelCase , return_tensors="""tf""" )
# forward pass
lowerCAmelCase_ : Optional[Any] = model(**UpperCAmelCase )
# verify the logits
lowerCAmelCase_ : Dict = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCAmelCase_ : List[str] = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCAmelCase , atol=1e-4 ) )
| 28
|
from math import ceil
def __UpperCamelCase ( lowercase__ : int = 1001 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : Optional[Any] = 2 * i + 1
lowerCAmelCase_ : Union[str, Any] = 2 * i
lowerCAmelCase_ : Optional[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 28
| 1
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = ConsistencyModelPipeline
lowercase_ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase_ : List[str] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase_ : List[str] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test', subfolder='test_unet', )
return unet
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test', subfolder='test_unet_class_cond', )
return unet
def UpperCamelCase ( self, lowerCamelCase=False) -> Dict:
"""simple docstring"""
if class_cond:
_lowercase : Union[str, Any] = self.dummy_cond_unet
else:
_lowercase : Union[str, Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
_lowercase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Tuple:
"""simple docstring"""
if str(lowerCamelCase).startswith('mps'):
_lowercase : str = torch.manual_seed(lowerCamelCase)
else:
_lowercase : int = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Optional[int] = self.get_dummy_components()
_lowercase : str = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : Dict = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : int = image[0, -3:, -3:, -1]
_lowercase : Dict = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Dict = self.get_dummy_components(class_cond=lowerCamelCase)
_lowercase : Any = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : str = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Any = 0
_lowercase : List[str] = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Any = self.get_dummy_components()
_lowercase : Optional[Any] = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : List[str] = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Union[str, Any] = 1
_lowercase : Tuple = None
_lowercase : Tuple = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : List[str] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Dict = self.get_dummy_components(class_cond=lowerCamelCase)
_lowercase : Dict = ConsistencyModelPipeline(**lowerCamelCase)
_lowercase : Optional[Any] = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = self.get_dummy_inputs(lowerCamelCase)
_lowercase : Tuple = 1
_lowercase : int = None
_lowercase : Tuple = 0
_lowercase : Dict = pipe(**lowerCamelCase).images
assert image.shape == (1, 32, 32, 3)
_lowercase : List[str] = image[0, -3:, -3:, -1]
_lowercase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self, lowerCamelCase=0, lowerCamelCase=False, lowerCamelCase="cpu", lowerCamelCase=torch.floataa, lowerCamelCase=(1, 3, 64, 64)) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = torch.manual_seed(lowerCamelCase)
_lowercase : str = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
_lowercase : Optional[Any] = self.get_fixed_latents(seed=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase, shape=lowerCamelCase)
_lowercase : Tuple = latents
return inputs
def UpperCamelCase ( self, lowerCamelCase=0, lowerCamelCase="cpu", lowerCamelCase=torch.floataa, lowerCamelCase=(1, 3, 64, 64)) -> Any:
"""simple docstring"""
if type(lowerCamelCase) == str:
_lowercase : Union[str, Any] = torch.device(lowerCamelCase)
_lowercase : int = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : List[str] = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase)
return latents
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Any = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = self.get_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : Optional[Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Union[str, Any] = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_inputs()
_lowercase : int = 1
_lowercase : Optional[Any] = None
_lowercase : str = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : List[Any] = image[0, -3:, -3:, -1]
_lowercase : List[str] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
@require_torch_a
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : Optional[int] = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase, torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_inputs(get_fixed_latents=lowerCamelCase, device=lowerCamelCase)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase, enable_math=lowerCamelCase, enable_mem_efficient=lowerCamelCase):
_lowercase : Dict = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@require_torch_a
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2')
_lowercase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40, sigma_min=0.0_0_2, sigma_max=8_0.0, )
_lowercase : int = ConsistencyModelPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase)
pipe.to(torch_device=lowerCamelCase, torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_inputs(get_fixed_latents=lowerCamelCase, device=lowerCamelCase)
_lowercase : int = 1
_lowercase : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase, enable_math=lowerCamelCase, enable_mem_efficient=lowerCamelCase):
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : int = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
| 21
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : int = int(number**0.5 )
return number == sq * sq
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> tuple[int, int]:
_lowercase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_lowercase : int = x_den * y_den * z_den
_lowercase : int = gcd(lowerCamelCase_ , lowerCamelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCamelCase_( lowerCamelCase_ = 35 ) -> int:
_lowercase : set = set()
_lowercase : int
_lowercase : Fraction = Fraction(0 )
_lowercase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_lowercase : int = x_num * y_den + x_den * y_num
_lowercase : int = x_den * y_den
_lowercase : str = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : List[Any] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
_lowercase : Dict = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_lowercase : List[Any] = x_den * x_den * y_den * y_den
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
_lowercase : Tuple = int(sqrt(lowerCamelCase_ ) )
_lowercase : int = int(sqrt(lowerCamelCase_ ) )
_lowercase : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : Optional[int] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=-1
_lowercase : Any = x_num * y_num
_lowercase : str = x_den * y_num + x_num * y_den
_lowercase : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : int = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
_lowercase : str = x_num * x_num * y_num * y_num
_lowercase : Optional[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
_lowercase : Tuple = int(sqrt(lowerCamelCase_ ) )
_lowercase : List[str] = int(sqrt(lowerCamelCase_ ) )
_lowercase : Union[str, Any] = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : Tuple = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
for num, den in unique_s:
total += Fraction(lowerCamelCase_ , lowerCamelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 21
| 1
|
from __future__ import annotations
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Tuple = order
# a_{0} ... a_{k}
_A: str = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_A: Dict = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_A: Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
_A: int = [0.0] * self.order
def __magic_name__ ( self : int , lowerCAmelCase_ : list[float] , lowerCAmelCase_ : list[float] ):
"""simple docstring"""
if len(_UpperCAmelCase ) < self.order:
_A: List[Any] = [1.0, *a_coeffs]
if len(_UpperCAmelCase ) != self.order + 1:
_A: str = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(_UpperCAmelCase )}"""
)
raise ValueError(_UpperCAmelCase )
if len(_UpperCAmelCase ) != self.order + 1:
_A: Optional[Any] = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(_UpperCAmelCase )}"""
)
raise ValueError(_UpperCAmelCase )
_A: Union[str, Any] = a_coeffs
_A: Dict = b_coeffs
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : float ):
"""simple docstring"""
_A: Optional[int] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_A: List[Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_A: List[str] = self.input_history[:-1]
_A: List[str] = self.output_history[:-1]
_A: int = sample
_A: Tuple = result
return result
| 367
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = (DDPMParallelScheduler,)
def __magic_name__ ( self : Optional[int] , **lowerCAmelCase_ : Any ):
"""simple docstring"""
_A: Optional[int] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __magic_name__ ( self : int ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config()
_A: Optional[Any] = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Any = self.scheduler_classes[0]
_A: List[str] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: List[Any] = len(lowerCAmelCase_ )
_A: Union[str, Any] = self.dummy_model()
_A: Dict = self.dummy_sample_deter
_A: Dict = self.dummy_sample_deter + 0.1
_A: str = self.dummy_sample_deter - 0.1
_A: str = samplea.shape[0]
_A: Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
_A: List[str] = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_A: List[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_A: Optional[int] = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_A: Dict = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: List[str] = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[Any] = self.scheduler_classes[0]
_A: List[Any] = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Optional[int] = self.dummy_sample_deter
_A: List[str] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Optional[int] = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: Optional[int] = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: List[Any] = pred_prev_sample
_A: Optional[int] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: Any = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_A: List[str] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Any = self.dummy_sample_deter
_A: str = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: int = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: Tuple = pred_prev_sample
_A: List[Any] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: str = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Dict = scheduler_class(**lowerCAmelCase_ )
_A: Any = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_A: Tuple = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_A: Dict = -1
else:
_A: int = timesteps[i + 1]
_A: List[str] = scheduler.previous_timestep(lowerCAmelCase_ )
_A: str = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Tuple = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: List[str] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
_A: Dict = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: str = scheduler_class(**lowerCAmelCase_ )
_A: Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 301
| 0
|
'''simple docstring'''
import numpy as np
class A__ :
"""simple docstring"""
def __init__( self : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : int = (0, 0)
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Optional[int] = 0
def __eq__( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self.position == cell.position
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
print(self.position )
class A__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[Any]=(5, 5) ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = np.zeros(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = world_size[0]
_UpperCAmelCase : Optional[int] = world_size[1]
def _lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
print(self.w )
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : str ) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[str] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_UpperCAmelCase : str = cell.position[0]
_UpperCAmelCase : str = cell.position[1]
_UpperCAmelCase : str = []
for n in neughbour_cord:
_UpperCAmelCase : Union[str, Any] = current_x + n[0]
_UpperCAmelCase : str = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_UpperCAmelCase : Tuple = Cell()
_UpperCAmelCase : Dict = (x, y)
_UpperCAmelCase : Optional[Any] = cell
neighbours.append(lowerCAmelCase__ )
return neighbours
def __UpperCAmelCase ( a_: str, a_: str, a_: str ):
_UpperCAmelCase : Any = []
_UpperCAmelCase : List[Any] = []
_open.append(a_ )
while _open:
_UpperCAmelCase : Dict = np.argmin([n.f for n in _open] )
_UpperCAmelCase : List[Any] = _open[min_f]
_closed.append(_open.pop(a_ ) )
if current == goal:
break
for n in world.get_neigbours(a_ ):
for c in _closed:
if c == n:
continue
_UpperCAmelCase : str = current.g + 1
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = n.position
_UpperCAmelCase , _UpperCAmelCase : List[str] = goal.position
_UpperCAmelCase : Union[str, Any] = (ya - ya) ** 2 + (xa - xa) ** 2
_UpperCAmelCase : str = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(a_ )
_UpperCAmelCase : List[Any] = []
while current.parent is not None:
path.append(current.position )
_UpperCAmelCase : List[str] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__a = Gridworld()
# Start position and goal
__a = Cell()
__a = (0, 0)
__a = Cell()
__a = (4, 4)
print(f'path from {start.position} to {goal.position}')
__a = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__a = 1
print(world.w)
| 145
|
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : CLIPSegForImageSegmentation , lowerCAmelCase__ : CLIPSegProcessor , lowerCAmelCase__ : AutoencoderKL , lowerCAmelCase__ : CLIPTextModel , lowerCAmelCase__ : CLIPTokenizer , lowerCAmelCase__ : UNetaDConditionModel , lowerCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase__ : StableDiffusionSafetyChecker , lowerCAmelCase__ : CLIPImageProcessor , ) -> Dict:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
_UpperCAmelCase : str = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
_UpperCAmelCase : Any = dict(scheduler.config )
_UpperCAmelCase : Tuple = 1
_UpperCAmelCase : Optional[Any] = FrozenDict(lowerCAmelCase__ )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
_UpperCAmelCase : Union[str, Any] = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = dict(scheduler.config )
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Dict = FrozenDict(lowerCAmelCase__ )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowerCAmelCase__ , segmentation_processor=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , )
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> Optional[int]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCAmelCase : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase : Dict = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__ , lowerCAmelCase__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[int] , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , **lowerCAmelCase__ : Any , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
_UpperCAmelCase : List[Any] = self.segmentation_model(**lowerCAmelCase__ )
_UpperCAmelCase : Any = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_UpperCAmelCase : str = self.numpy_to_pil(lowerCAmelCase__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_UpperCAmelCase : Union[str, Any] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , )
| 145
| 1
|
from __future__ import annotations
_lowerCAmelCase : str = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class __magic_name__ :
def __init__( self , __snake_case , __snake_case ) -> None:
'''simple docstring'''
__a =graph
# mapping node to its parent in resulting breadth first tree
__a ={}
__a =source_vertex
def __magic_name__ ( self ) -> None:
'''simple docstring'''
__a ={self.source_vertex}
__a =None
__a =[self.source_vertex] # first in first out queue
while queue:
__a =queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__snake_case )
__a =vertex
queue.append(__snake_case )
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
__a =self.parent.get(__snake_case )
if target_vertex_parent is None:
__a =(
f'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(__snake_case )
return self.shortest_path(__snake_case ) + f'->{target_vertex}'
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 308
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> int:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=__snake_case , )
assert hasattr(self , 'env' )
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
__a =f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
__a ={'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__snake_case , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__snake_case , py_version='py36' , )
def __magic_name__ ( self , __snake_case ) -> Optional[Any]:
'''simple docstring'''
TrainingJobAnalytics(__snake_case ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def __magic_name__ ( self , __snake_case ) -> Optional[int]:
'''simple docstring'''
# create estimator
__a =self.create_estimator(__snake_case )
# run training
estimator.fit()
# result dataframe
__a =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a =(
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __snake_case )
| 308
| 1
|
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowerCamelCase__ ( _A):
'''simple docstring'''
def __init__( self :Optional[int] , a :Dict , a :List[Any]=None , a :Union[str, Any]=True , a :str=None , **a :List[Any] ) -> int:
__UpperCamelCase : Any = parent
__UpperCamelCase : List[str] = config_class
__UpperCamelCase : Tuple = has_text_modality
__UpperCamelCase : Optional[int] = kwargs
__UpperCamelCase : str = common_properties
def _lowerCamelCase ( self :List[Any] ) -> str:
__UpperCamelCase : List[str] = self.config_class(**self.inputs_dict )
__UpperCamelCase : str = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) , msg=f'`{prop}` does not exist' )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCamelCase__ ):
try:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.parent.assertEqual(
getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , msg=f'`{name} value {idx} expected, but was {getattr(UpperCamelCase__ , UpperCamelCase__ )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCamelCase__ ):
try:
__UpperCamelCase : Optional[Any] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , msg=f'`{name} value {idx} expected, but was {getattr(UpperCamelCase__ , UpperCamelCase__ )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _lowerCamelCase ( self :List[str] ) -> Any:
__UpperCamelCase : int = self.config_class(**self.inputs_dict )
__UpperCamelCase : Union[str, Any] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCamelCase__ )
def _lowerCamelCase ( self :Tuple ) -> Optional[Any]:
__UpperCamelCase : Any = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase : Optional[Any] = os.path.join(UpperCamelCase__ , "config.json" )
config_first.to_json_file(UpperCamelCase__ )
__UpperCamelCase : List[Any] = self.config_class.from_json_file(UpperCamelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowerCamelCase ( self :int ) -> List[str]:
__UpperCamelCase : int = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCamelCase__ )
__UpperCamelCase : Any = self.config_class.from_pretrained(UpperCamelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowerCamelCase ( self :str ) -> Tuple:
__UpperCamelCase : Dict = self.config_class(**self.inputs_dict )
__UpperCamelCase : Optional[Any] = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
config_first.save_pretrained(UpperCamelCase__ )
__UpperCamelCase : Tuple = self.config_class.from_pretrained(UpperCamelCase__ , subfolder=UpperCamelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowerCamelCase ( self :List[Any] ) -> List[Any]:
__UpperCamelCase : List[Any] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__UpperCamelCase : Dict = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _lowerCamelCase ( self :Union[str, Any] ) -> Optional[Any]:
if self.config_class.is_composition:
return
__UpperCamelCase : List[Any] = self.config_class()
self.parent.assertIsNotNone(UpperCamelCase__ )
def _lowerCamelCase ( self :Optional[int] ) -> List[Any]:
__UpperCamelCase : List[str] = copy.deepcopy(UpperCamelCase__ )
__UpperCamelCase : List[str] = self.config_class(**UpperCamelCase__ )
__UpperCamelCase : Dict = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(UpperCamelCase__ , UpperCamelCase__ ) != value:
wrong_values.append((key, getattr(UpperCamelCase__ , UpperCamelCase__ ), value) )
if len(UpperCamelCase__ ) > 0:
__UpperCamelCase : Any = "\n".join([f'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values] )
raise ValueError(f'The following keys were not properly set in the config:\n{errors}' )
def _lowerCamelCase ( self :Optional[int] ) -> List[Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 232
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowerCAmelCase : Optional[int] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
__lowerCAmelCase : Optional[Any] = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
__lowerCAmelCase : Optional[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def a__ ( A_ ):
'''simple docstring'''
return x[0]
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_letter_count(A_ )
__magic_name__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A_ )
__magic_name__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=A_ )
__magic_name__ = """""".join(freq_to_letter[freq] )
__magic_name__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A_, reverse=A_ )
__magic_name__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_frequency_order(A_ )
__magic_name__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Dict[str, int]] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : int , ) -> None:
super().__init__(**UpperCAmelCase__ )
_a : Any = size if size is not None else {"""shortest_edge""": 256}
_a : List[str] = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
_a : List[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_a : int = get_size_dict(UpperCAmelCase__ , param_name="""crop_size""" )
_a : Any = do_resize
_a : Any = size
_a : Union[str, Any] = resample
_a : int = do_center_crop
_a : Optional[Any] = crop_size
_a : Optional[int] = do_rescale
_a : List[str] = rescale_factor
_a : Any = do_normalize
_a : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ) -> np.ndarray:
_a : Optional[Any] = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_a : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Dict , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : int , ) -> np.ndarray:
_a : List[Any] = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(UpperCAmelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Tuple ) -> np.ndarray:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Tuple , ) -> np.ndarray:
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ) -> int:
_a : Tuple = do_resize if do_resize is not None else self.do_resize
_a : Any = size if size is not None else self.size
_a : int = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
_a : int = resample if resample is not None else self.resample
_a : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
_a : Union[str, Any] = get_size_dict(UpperCAmelCase__ , param_name="""crop_size""" )
_a : Any = do_rescale if do_rescale is not None else self.do_rescale
_a : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : Any = do_normalize if do_normalize is not None else self.do_normalize
_a : Any = image_mean if image_mean is not None else self.image_mean
_a : Optional[int] = image_std if image_std is not None else self.image_std
_a : Dict = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_a : Dict = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
_a : Dict = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
_a : Optional[Any] = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
_a : Any = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
_a : Any = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
_a : int = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
_a : Tuple = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Tuple] = None ) -> int:
_a : List[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(UpperCAmelCase__ ):
_a : Optional[int] = target_sizes.numpy()
_a : Optional[Any] = []
for idx in range(len(UpperCAmelCase__ ) ):
_a : List[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=UpperCAmelCase__ )
_a : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
_a : List[str] = logits.argmax(dim=1 )
_a : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 365
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324
| 0
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCamelCase (SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ : str = """SpeechT5FeatureExtractor"""
lowerCamelCase__ : List[str] = """SpeechT5Tokenizer"""
def __init__( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] ) -> Optional[Any]:
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self : int , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = kwargs.pop("""audio""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""text""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""text_target""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""audio_target""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""sampling_rate""" , __UpperCAmelCase )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
SCREAMING_SNAKE_CASE__ = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
elif text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ = None
if audio_target is not None:
SCREAMING_SNAKE_CASE__ = self.feature_extractor(audio_target=__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = targets["""input_values"""]
elif text_target is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = targets["""input_ids"""]
else:
SCREAMING_SNAKE_CASE__ = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE__ = labels
SCREAMING_SNAKE_CASE__ = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Tuple , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ = kwargs.pop("""input_values""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""input_ids""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""labels""" , __UpperCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
SCREAMING_SNAKE_CASE__ = self.feature_extractor.pad(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
elif input_ids is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer.pad(__UpperCAmelCase , **__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and "input_ids" in labels[0]):
SCREAMING_SNAKE_CASE__ = self.tokenizer.pad(__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = targets["""input_ids"""]
else:
SCREAMING_SNAKE_CASE__ = self.feature_extractor.feature_size
SCREAMING_SNAKE_CASE__ = self.feature_extractor.num_mel_bins
SCREAMING_SNAKE_CASE__ = self.feature_extractor.pad(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = feature_size_hack
SCREAMING_SNAKE_CASE__ = targets["""input_values"""]
else:
SCREAMING_SNAKE_CASE__ = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE__ = labels
SCREAMING_SNAKE_CASE__ = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : List[Any] , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : List[str] ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , *__UpperCAmelCase : Any , **__UpperCAmelCase : Optional[Any] ) -> Dict:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
| 165
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
a__ = logging.get_logger(__name__)
# General docstring
a__ = """RegNetConfig"""
# Base docstring
a__ = """facebook/regnet-y-040"""
a__ = [1, 10_88, 7, 7]
# Image classification docstring
a__ = """facebook/regnet-y-040"""
a__ = """tabby, tabby cat"""
a__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[str] = "relu" , ) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : int = nn.Convad(
lowerCAmelCase , lowerCAmelCase , kernel_size=lowerCAmelCase , stride=lowerCAmelCase , padding=kernel_size // 2 , groups=lowerCAmelCase , bias=lowerCAmelCase , )
_snake_case : List[Any] = nn.BatchNormad(lowerCAmelCase)
_snake_case : Tuple = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
_snake_case : Tuple = self.convolution(lowerCAmelCase)
_snake_case : Any = self.normalization(lowerCAmelCase)
_snake_case : List[Any] = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act)
_snake_case : Dict = config.num_channels
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
_snake_case : str = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""")
_snake_case : Any = self.embedder(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , stride=lowerCAmelCase , bias=lowerCAmelCase)
_snake_case : Tuple = nn.BatchNormad(lowerCAmelCase)
def UpperCamelCase_ ( self : int , lowerCAmelCase : Tensor) -> Tensor:
"""simple docstring"""
_snake_case : Optional[Any] = self.convolution(lowerCAmelCase)
_snake_case : Optional[int] = self.normalization(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int) -> Any:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1))
_snake_case : Optional[Any] = nn.Sequential(
nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.ReLU() , nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.Sigmoid() , )
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
_snake_case : Dict = self.pooler(lowerCAmelCase)
_snake_case : List[str] = self.attention(lowerCAmelCase)
_snake_case : str = hidden_state * attention
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_snake_case : Optional[int] = in_channels != out_channels or stride != 1
_snake_case : Optional[Any] = max(1 , out_channels // config.groups_width)
_snake_case : Union[str, Any] = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
_snake_case : Tuple = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , )
_snake_case : Dict = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = hidden_state
_snake_case : int = self.layer(lowerCAmelCase)
_snake_case : Dict = self.shortcut(lowerCAmelCase)
hidden_state += residual
_snake_case : str = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : int = in_channels != out_channels or stride != 1
_snake_case : Dict = max(1 , out_channels // config.groups_width)
_snake_case : Tuple = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
_snake_case : Dict = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetSELayer(lowerCAmelCase , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , )
_snake_case : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : List[Any]) -> Tuple:
"""simple docstring"""
_snake_case : Tuple = hidden_state
_snake_case : List[Any] = self.layer(lowerCAmelCase)
_snake_case : List[str] = self.shortcut(lowerCAmelCase)
hidden_state += residual
_snake_case : int = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , ) -> int:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
_snake_case : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , ) , *[layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) for _ in range(depth - 1)] , )
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
_snake_case : List[str] = self.layers(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : RegNetConfig) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : Dict = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
_snake_case : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(lowerCAmelCase , config.depths[1:]):
self.stages.append(RegNetStage(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , depth=lowerCAmelCase))
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Tensor , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
_snake_case : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case : Optional[int] = hidden_states + (hidden_state,)
_snake_case : Dict = stage_module(lowerCAmelCase)
if output_hidden_states:
_snake_case : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase , hidden_states=lowerCAmelCase)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = RegNetConfig
snake_case_ : List[Any] = """regnet"""
snake_case_ : Any = """pixel_values"""
snake_case_ : Optional[Any] = True
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
if isinstance(lowerCAmelCase , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""")
elif isinstance(lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=False) -> Optional[int]:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : Optional[Any] = value
a__ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
a__ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" ,SCREAMING_SNAKE_CASE_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
super().__init__(lowerCAmelCase)
_snake_case : Any = config
_snake_case : Any = RegNetEmbeddings(lowerCAmelCase)
_snake_case : Dict = RegNetEncoder(lowerCAmelCase)
_snake_case : Tuple = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Tensor , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
_snake_case : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : str = self.embedder(lowerCAmelCase)
_snake_case : Optional[Any] = self.encoder(
lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase)
_snake_case : Tuple = encoder_outputs[0]
_snake_case : Optional[Any] = self.pooler(lowerCAmelCase)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase , pooler_output=lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,SCREAMING_SNAKE_CASE_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
super().__init__(lowerCAmelCase)
_snake_case : Union[str, Any] = config.num_labels
_snake_case : List[Any] = RegNetModel(lowerCAmelCase)
# classification head
_snake_case : Union[str, Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : int , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[torch.LongTensor] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
_snake_case : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : Tuple = self.regnet(lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase)
_snake_case : str = outputs.pooler_output if return_dict else outputs[1]
_snake_case : Optional[Any] = self.classifier(lowerCAmelCase)
_snake_case : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case : List[Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case : Optional[int] = """single_label_classification"""
else:
_snake_case : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
_snake_case : List[str] = MSELoss()
if self.num_labels == 1:
_snake_case : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze())
else:
_snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase)
elif self.config.problem_type == "single_label_classification":
_snake_case : Dict = CrossEntropyLoss()
_snake_case : int = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
_snake_case : Optional[int] = BCEWithLogitsLoss()
_snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase)
if not return_dict:
_snake_case : Optional[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase , logits=lowerCAmelCase , hidden_states=outputs.hidden_states)
| 317
| 0
|
"""simple docstring"""
from __future__ import annotations
class __lowerCAmelCase :
def __init__( self: Optional[int] , _lowerCAmelCase: Optional[int] ):
lowercase :Union[str, Any] = data
lowercase :int = None
lowercase :List[str] = None
def UpperCAmelCase__ ( lowerCamelCase ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def UpperCAmelCase__ ( lowerCamelCase ):
return 1 + max(depth_of_tree(tree.left ), depth_of_tree(tree.right ) ) if tree else 0
def UpperCAmelCase__ ( lowerCamelCase ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def UpperCAmelCase__ ( ): # Main function for testing.
lowercase :Optional[Any] = Node(1 )
lowercase :Tuple = Node(2 )
lowercase :str = Node(3 )
lowercase :Any = Node(4 )
lowercase :Any = Node(5 )
lowercase :int = Node(6 )
lowercase :Dict = Node(7 )
lowercase :Tuple = Node(8 )
lowercase :int = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print("Tree is: " )
display(snake_case__ )
if __name__ == "__main__":
main()
| 365
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( lowerCAmelCase):
_a = (DDIMParallelScheduler,)
_a = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def SCREAMING_SNAKE_CASE ( self: Any , **_lowerCAmelCase: Optional[Any] ):
lowercase :List[Any] = {
"num_train_timesteps": 10_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self: str , **_lowerCAmelCase: Any ):
lowercase :Optional[int] = self.scheduler_classes[0]
lowercase :Dict = self.get_scheduler_config(**_lowerCAmelCase )
lowercase :List[str] = scheduler_class(**_lowerCAmelCase )
lowercase , lowercase :str = 10, 0.0
lowercase :List[Any] = self.dummy_model()
lowercase :int = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for t in scheduler.timesteps:
lowercase :Optional[int] = model(_lowerCAmelCase , _lowerCAmelCase )
lowercase :Dict = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCAmelCase )
lowercase :Optional[Any] = self.scheduler_classes[0]
lowercase :List[str] = self.get_scheduler_config(steps_offset=1 )
lowercase :Optional[int] = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Dict ):
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self: str ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowerCAmelCase , eta=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Dict = self.scheduler_classes[0]
lowercase :Tuple = self.get_scheduler_config()
lowercase :Optional[Any] = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Union[str, Any] = self.scheduler_classes[0]
lowercase :Union[str, Any] = self.get_scheduler_config()
lowercase :Union[str, Any] = scheduler_class(**_lowerCAmelCase )
lowercase , lowercase :Union[str, Any] = 10, 0.0
scheduler.set_timesteps(_lowerCAmelCase )
lowercase :Dict = self.dummy_model()
lowercase :Dict = self.dummy_sample_deter
lowercase :Union[str, Any] = self.dummy_sample_deter + 0.1
lowercase :int = self.dummy_sample_deter - 0.1
lowercase :Dict = samplea.shape[0]
lowercase :Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
lowercase :Optional[Any] = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
lowercase :Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowercase :Optional[int] = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowerCAmelCase )
lowercase :int = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Optional[int] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :int = self.full_loop()
lowercase :Optional[int] = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Any = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :Dict = self.full_loop(prediction_type="v_prediction" )
lowercase :int = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
# We specify different beta, so that the first alpha is 0.99
lowercase :List[Any] = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
lowercase :List[Any] = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Any ):
# We specify different beta, so that the first alpha is 0.99
lowercase :Tuple = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
lowercase :str = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :List[str] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 158
| 0
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_=-1 ) -> Tuple:
__lowerCAmelCase = label_idx
def A__ ( self , snake_case_ , snake_case_ ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__lowerCAmelCase = mode.value
__lowerCAmelCase = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
__lowerCAmelCase = 1
__lowerCAmelCase = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
__lowerCAmelCase = []
__lowerCAmelCase = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
__lowerCAmelCase = []
__lowerCAmelCase = []
else:
__lowerCAmelCase = line.split(""" """ )
words.append(splits[0] )
if len(UpperCAmelCase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
return examples
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
__lowerCAmelCase = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCAmelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__lowerCAmelCase = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCAmelCase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for \'%s\'.""" , line.split()[0] )
def A__ ( self , snake_case_ ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
__lowerCAmelCase = f.read().splitlines()
if "O" not in labels:
__lowerCAmelCase = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCAmelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self ) -> int:
super().__init__(label_idx=-2 )
def A__ ( self , snake_case_ ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
__lowerCAmelCase = f.read().splitlines()
if "O" not in labels:
__lowerCAmelCase = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCAmelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def A__ ( self , snake_case_ , snake_case_ ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__lowerCAmelCase = mode.value
__lowerCAmelCase = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
__lowerCAmelCase = 1
__lowerCAmelCase = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCAmelCase__ ):
__lowerCAmelCase = []
__lowerCAmelCase = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
return examples
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
__lowerCAmelCase = 0
for sentence in parse_incr(UpperCAmelCase__ ):
__lowerCAmelCase = preds_list[example_id]
__lowerCAmelCase = """"""
for token in sentence:
out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(UpperCAmelCase__ )
example_id += 1
def A__ ( self , snake_case_ ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 301
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]:
"""simple docstring"""
A__ = list(range(len(lowercase_ ) ) )
A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
A__ = 0
A__ = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
A__ = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14
| 0
|
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (DDIMParallelScheduler,)
SCREAMING_SNAKE_CASE = (('eta', 0.0), ('num_inference_steps', 5_0))
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**_lowerCamelCase )
return config
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = self.scheduler_classes[0]
UpperCAmelCase__ : List[str] = self.get_scheduler_config(**_lowerCamelCase )
UpperCAmelCase__ : Tuple = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ : str = 10, 0.0
UpperCAmelCase__ : Tuple = self.dummy_model()
UpperCAmelCase__ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCamelCase )
for t in scheduler.timesteps:
UpperCAmelCase__ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : str = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
return sample
def _a (self ):
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCamelCase )
UpperCAmelCase__ : str = self.scheduler_classes[0]
UpperCAmelCase__ : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def _a (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , )
def _a (self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=_lowerCamelCase , num_inference_steps=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowerCamelCase , eta=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.scheduler_classes[0]
UpperCAmelCase__ : Dict = self.get_scheduler_config()
UpperCAmelCase__ : Any = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.scheduler_classes[0]
UpperCAmelCase__ : str = self.get_scheduler_config()
UpperCAmelCase__ : int = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ : List[str] = 10, 0.0
scheduler.set_timesteps(_lowerCamelCase )
UpperCAmelCase__ : Tuple = self.dummy_model()
UpperCAmelCase__ : Optional[Any] = self.dummy_sample_deter
UpperCAmelCase__ : str = self.dummy_sample_deter + 0.1
UpperCAmelCase__ : Any = self.dummy_sample_deter - 0.1
UpperCAmelCase__ : Tuple = samplea.shape[0]
UpperCAmelCase__ : Dict = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase__ : int = torch.arange(_lowerCamelCase )[0:3, None].repeat(1 , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase__ : int = scheduler.batch_step_no_noise(_lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowerCamelCase )
UpperCAmelCase__ : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.full_loop()
UpperCAmelCase__ : Optional[int] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase__ : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.01 )
UpperCAmelCase__ : List[str] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.01 )
UpperCAmelCase__ : Optional[int] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 361
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def a__ ( lowerCAmelCase ) -> Tuple:
UpperCAmelCase__ : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
UpperCAmelCase__ : Dict = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
UpperCAmelCase__ : int = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase__ : Optional[int] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCAmelCase__ : Union[str, Any] = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(lowerCAmelCase )-1}""" )
if "norm" in key:
UpperCAmelCase__ : Optional[Any] = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase__ : int = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
UpperCAmelCase__ : Union[str, Any] = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(lowerCAmelCase )-1}""" )
if "layer_norm1" in key:
UpperCAmelCase__ : Any = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCAmelCase__ : Union[str, Any] = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase__ : int = key[key.find("""block""" ) + len("""block""" )]
UpperCAmelCase__ : List[Any] = key.replace(F"""block{idx}""" , F"""block.{int(lowerCAmelCase )-1}""" )
if "attn.q" in key:
UpperCAmelCase__ : List[Any] = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCAmelCase__ : Tuple = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCAmelCase__ : Union[str, Any] = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCAmelCase__ : int = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCAmelCase__ : List[Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCAmelCase__ : Optional[Any] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCAmelCase__ : Optional[Any] = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCAmelCase__ : Optional[Any] = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase__ : List[Any] = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCAmelCase__ : int = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(lowerCAmelCase )-1}""" )
if "bot_conv" in key:
UpperCAmelCase__ : int = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
UpperCAmelCase__ : List[Any] = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
UpperCAmelCase__ : List[Any] = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
UpperCAmelCase__ : Optional[Any] = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
UpperCAmelCase__ : List[str] = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
UpperCAmelCase__ : int = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
UpperCAmelCase__ : Union[str, Any] = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
UpperCAmelCase__ : Optional[int] = key.replace("""module.last_layer_depth""" , """head.head""" )
UpperCAmelCase__ : Optional[Any] = value
return new_state_dict
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Dict:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase__ : Dict = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
UpperCAmelCase__ : int = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase__ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase__ : int = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase__ : int = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase__ : List[Any] = kv_bias[config.hidden_sizes[i] :]
def a__ ( ) -> int:
UpperCAmelCase__ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ : int = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return image
@torch.no_grad()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=None ) -> Union[str, Any]:
UpperCAmelCase__ : Any = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase__ : Any = GLPNImageProcessor()
# prepare image
UpperCAmelCase__ : List[str] = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
UpperCAmelCase__ : Tuple = torch.load(lowerCAmelCase , map_location=torch.device("""cpu""" ) )
# rename keys
UpperCAmelCase__ : Optional[Any] = rename_keys(lowerCAmelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase , lowerCAmelCase )
# create HuggingFace model and load state dict
UpperCAmelCase__ : Union[str, Any] = GLPNForDepthEstimation(lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
# forward pass
UpperCAmelCase__ : Any = model(lowerCAmelCase )
UpperCAmelCase__ : Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase__ : int = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
UpperCAmelCase__ : Any = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCAmelCase , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase , )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_A = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 166
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
lowerCamelCase__ = """▁"""
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] =VOCAB_FILES_NAMES
__lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any =AlbertTokenizer
def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Optional[int]=None , __lowercase : int=True , __lowercase : Dict=True , __lowercase : str=False , __lowercase : str="[CLS]" , __lowercase : List[Any]="[SEP]" , __lowercase : Any="<unk>" , __lowercase : List[Any]="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : str , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase )
if isinstance(__lowercase , __lowercase )
else mask_token
)
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 302
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
lowerCamelCase__ = """▁"""
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] =VOCAB_FILES_NAMES
__lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any =AlbertTokenizer
def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Optional[int]=None , __lowercase : int=True , __lowercase : Dict=True , __lowercase : str=False , __lowercase : str="[CLS]" , __lowercase : List[Any]="[SEP]" , __lowercase : Any="<unk>" , __lowercase : List[Any]="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : str , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase )
if isinstance(__lowercase , __lowercase )
else mask_token
)
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 302
| 1
|
from __future__ import annotations
import math
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = str(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = [n]
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if len(str(_SCREAMING_SNAKE_CASE ) ) > 3:
if not is_prime(int(str(_SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(_SCREAMING_SNAKE_CASE )[:3] ) ):
return False
return True
def __lowercase ( _SCREAMING_SNAKE_CASE = 11 ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 13
while len(_SCREAMING_SNAKE_CASE ) != count:
if validate(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = list_truncated_nums(_SCREAMING_SNAKE_CASE )
if all(is_prime(_SCREAMING_SNAKE_CASE ) for i in list_nums ):
list_truncated_primes.append(_SCREAMING_SNAKE_CASE )
num += 2
return list_truncated_primes
def __lowercase ( ) -> int:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(1_1)) = }''')
| 193
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 193
| 1
|
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_SCREAMING_SNAKE_CASE = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_SCREAMING_SNAKE_CASE = [0, 25, 50]
_SCREAMING_SNAKE_CASE = [25, 50, 75]
_SCREAMING_SNAKE_CASE = fuzz.membership.trimf(X, abca)
_SCREAMING_SNAKE_CASE = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_SCREAMING_SNAKE_CASE = np.ones(75)
_SCREAMING_SNAKE_CASE = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_SCREAMING_SNAKE_CASE = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_SCREAMING_SNAKE_CASE = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_SCREAMING_SNAKE_CASE = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_SCREAMING_SNAKE_CASE = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_SCREAMING_SNAKE_CASE = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_SCREAMING_SNAKE_CASE = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_SCREAMING_SNAKE_CASE = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_SCREAMING_SNAKE_CASE = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 158
|
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int = 1000 ):
'''simple docstring'''
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 158
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __A (SCREAMING_SNAKE_CASE__ , unittest.TestCase):
'''simple docstring'''
__lowercase: List[Any] = RoCBertTokenizer
__lowercase: Optional[int] = None
__lowercase: Tuple = False
__lowercase: int = True
__lowercase: Union[str, Any] = filter_non_english
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
super().setUp()
snake_case_ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
snake_case_ = {}
snake_case_ = {}
for i, value in enumerate(UpperCAmelCase_ ):
snake_case_ = i
snake_case_ = i
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
snake_case_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case_ = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(UpperCAmelCase_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase_ ) , [5, 6, 2, 5, 7, 8] )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
snake_case_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
snake_case_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
snake_case_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
snake_case_ = {}
for i, token in enumerate(UpperCAmelCase_ ):
snake_case_ = i
snake_case_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase_ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
snake_case_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase_ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def lowerCAmelCase ( self : List[Any] ) ->int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
snake_case_ = tokenizer_r.encode_plus(
UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , )
snake_case_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase_ , """do_lower_case""" ) else False
snake_case_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def lowerCAmelCase ( self : str ) ->List[str]:
"""simple docstring"""
snake_case_ = ['的', '人', '有']
snake_case_ = ''.join(UpperCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case_ = True
snake_case_ = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = tokenizer_p.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer_r.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = False
snake_case_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = tokenizer_r.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer_p.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case_ = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase_ )
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case_ = tokenizer.encode("""你好""" , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.encode("""你是谁""" , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case_ = '你好,你是谁'
snake_case_ = tokenizer.tokenize(UpperCAmelCase_ )
snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
snake_case_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase_ )
snake_case_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase_ )
snake_case_ = tokenizer.prepare_for_model(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 359
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __A :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : int ) ->None:
"""simple docstring"""
snake_case_ = value
snake_case_ = None
snake_case_ = None
class __A :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Node ) ->None:
"""simple docstring"""
snake_case_ = tree
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Node | None ) ->int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : str ) ->Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233
| 0
|
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError('Input must be a positive integer' )
UpperCamelCase = [True] * (num + 1)
UpperCamelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , A__ ):
UpperCamelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : Optional[int] = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 28
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : List[Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a__ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_a : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_a : Any = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_a : str = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __lowercase ( self , **_a ) -> Any:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , **_a ) -> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_a : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self ) -> str:
_a : List[str] = self.get_tokenizer()
_a : Tuple = self.get_image_processor()
_a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Dict:
_a : List[str] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a : List[Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Dict = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : int = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[str] = self.prepare_image_inputs()
_a : List[Any] = image_processor(_a , return_tensors='''np''' )
_a : Dict = processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_tokenizer()
_a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Tuple = '''lower newer'''
_a : int = processor(text=_a )
_a : str = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self ) -> List[Any]:
_a : Any = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[Any] = '''lower newer'''
_a : Union[str, Any] = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Any = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : int = processor.batch_decode(_a )
_a : int = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __lowercase ( self ) -> List[Any]:
_a : Tuple = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Optional[int] = '''lower newer'''
_a : Dict = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 15
| 0
|
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
lowerCamelCase_ : Tuple = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase_ : Dict = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _A ( lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
for attribute in key.split('''.''' ):
a =getattr(lowercase , lowercase )
if weight_type is not None:
a =getattr(lowercase , lowercase ).shape
else:
a =hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a =value
elif weight_type == "weight_g":
a =value
elif weight_type == "weight_v":
a =value
elif weight_type == "bias":
a =value
else:
a =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =[]
a =fairseq_model.state_dict()
a =hf_model.feature_extractor
for name, value in fairseq_dict.items():
a =False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == '''group''' , )
a =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
a =True
if "*" in mapped_key:
a =name.split(lowercase )[0].split('''.''' )[-2]
a =mapped_key.replace('''*''' , lowercase )
if "weight_g" in name:
a ='''weight_g'''
elif "weight_v" in name:
a ='''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
a ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a ='''weight'''
else:
a =None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _A ( lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
a =full_name.split('''conv_layers.''' )[-1]
a =name.split('''.''' )
a =int(items[0] )
a =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _A ( lowercase , lowercase , lowercase=None ):
"""simple docstring"""
# load the pre-trained checkpoints
a =torch.load(lowercase )
a =WavLMConfigOrig(checkpoint['''cfg'''] )
a =WavLMOrig(lowercase )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
a =WavLMConfig.from_pretrained(lowercase )
else:
a =WavLMConfig()
a =WavLMModel(lowercase )
recursively_load_weights(lowercase , lowercase )
hf_wavlm.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase_ : Tuple = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 81
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 81
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 193
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any]=3 ,lowerCamelCase__ : List[str]=32 ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : str=10 ,lowerCamelCase__ : Any=[10, 20, 30, 40] ,lowerCamelCase__ : Optional[Any]=[1, 1, 2, 1] ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Tuple="relu" ,lowerCamelCase__ : Dict=3 ,lowerCamelCase__ : Optional[int]=None ,) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embeddings_size
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = len(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxRegNetModel(config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = FlaxRegNetForImageClassification(config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__snake_case : Tuple = False
__snake_case : int = False
__snake_case : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]:
'''simple docstring'''
return
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) ,expected_num_stages + 1 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ : Dict ,**lowerCamelCase__ : Optional[Any] ):
return model(pixel_values=lowerCamelCase__ ,**lowerCamelCase__ )
with self.subTest("""JIT Enabled""" ):
SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertEqual(jitted_output.shape ,output.shape )
def __lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase__ ,return_tensors="""np""" )
SCREAMING_SNAKE_CASE = model(**lowerCamelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE = (1, 1000)
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
| 193
| 1
|
'''simple docstring'''
import random
def snake_case_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ : Optional[int] = [], [], []
for element in data:
if element < pivot:
less.append(__SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(__SCREAMING_SNAKE_CASE )
else:
equal.append(__SCREAMING_SNAKE_CASE )
return less, equal, greater
def snake_case_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index >= len(__SCREAMING_SNAKE_CASE ) or index < 0:
return None
lowercase_ : Dict = items[random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )]
lowercase_ : Optional[int] = 0
lowercase_ , lowercase_ , lowercase_ : Optional[int] = _partition(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = len(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = len(__SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(__SCREAMING_SNAKE_CASE , index - (m + count) )
| 93
|
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FunnelTokenizer
SCREAMING_SNAKE_CASE = FunnelTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[int]:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : str = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , **_SCREAMING_SNAKE_CASE: Any) -> str:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: str) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = "UNwant\u00E9d,running"
__lowerCAmelCase : str = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.tokenizer_class(self.vocab_file)
__lowerCAmelCase : Any = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE) , [7, 4, 5, 10, 8, 9])
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE)
for tokenizer in tokenizers:
__lowerCAmelCase : List[str] = tokenizer("UNwant\u00E9d,running")
__lowerCAmelCase : Optional[int] = len(inputs["input_ids"]) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len)
__lowerCAmelCase : List[str] = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running")
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len)
| 269
| 0
|
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
_UpperCamelCase: Optional[int] = logging.get_logger(__name__)
_UpperCamelCase: int = 'Hello world! cécé herlolip'
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] = FairseqRobertaModel.from_pretrained(_UpperCAmelCase )
roberta.eval() # disable dropout
lowercase : Optional[Any] = roberta.model.encoder.sentence_encoder
lowercase : Tuple = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
lowercase : int = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , _UpperCAmelCase )
lowercase : Tuple = XLMRobertaXLForSequenceClassification(_UpperCAmelCase ) if classification_head else XLMRobertaXLForMaskedLM(_UpperCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase : Dict = roberta_sent_encoder.embed_tokens.weight
lowercase : List[str] = roberta_sent_encoder.embed_positions.weight
lowercase : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowercase : List[Any] = roberta_sent_encoder.layer_norm.weight
lowercase : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase : BertLayer = model.roberta.encoder.layer[i]
lowercase : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
lowercase : RobertaAttention = layer.attention
lowercase : Any = roberta_layer.self_attn_layer_norm.weight
lowercase : Union[str, Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
lowercase : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowercase : Dict = roberta_layer.self_attn.q_proj.weight
lowercase : Tuple = roberta_layer.self_attn.q_proj.bias
lowercase : Any = roberta_layer.self_attn.k_proj.weight
lowercase : str = roberta_layer.self_attn.k_proj.bias
lowercase : Tuple = roberta_layer.self_attn.v_proj.weight
lowercase : int = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowercase : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowercase : Tuple = roberta_layer.self_attn.out_proj.weight
lowercase : List[str] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowercase : Optional[Any] = roberta_layer.final_layer_norm.weight
lowercase : Optional[Any] = roberta_layer.final_layer_norm.bias
# intermediate
lowercase : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase : Tuple = roberta_layer.fca.weight
lowercase : Dict = roberta_layer.fca.bias
# output
lowercase : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase : Any = roberta_layer.fca.weight
lowercase : str = roberta_layer.fca.bias
# end of layer
if classification_head:
lowercase : int = roberta.model.classification_heads['mnli'].dense.weight
lowercase : Optional[int] = roberta.model.classification_heads['mnli'].dense.bias
lowercase : Union[str, Any] = roberta.model.classification_heads['mnli'].out_proj.weight
lowercase : List[Any] = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
lowercase : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
lowercase : Optional[int] = roberta.model.encoder.lm_head.dense.bias
lowercase : List[str] = roberta.model.encoder.lm_head.layer_norm.weight
lowercase : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.bias
lowercase : Optional[int] = roberta.model.encoder.lm_head.weight
lowercase : str = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase : torch.Tensor = roberta.encode(_UpperCAmelCase ).unsqueeze(0 ) # batch of size 1
lowercase : Dict = model(_UpperCAmelCase )[0]
if classification_head:
lowercase : str = roberta.model.classification_heads['mnli'](roberta.extract_features(_UpperCAmelCase ) )
else:
lowercase : Optional[int] = roberta.model(_UpperCAmelCase )[0]
print(our_output.shape , their_output.shape )
lowercase : Optional[int] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
lowercase : str = torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(_UpperCAmelCase ).mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
_UpperCamelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_UpperCamelCase: List[str] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 53
|
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_UpperCamelCase: Tuple = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class a__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : int, **lowerCAmelCase : str ) -> Any:
super().__init__(**lowerCAmelCase )
requires_backends(self, 'vision' )
requires_backends(self, 'torch' )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(lowerCAmelCase )
def lowercase ( self : Optional[int], **lowerCAmelCase : int ) -> Tuple:
lowercase : List[Any] = {}
lowercase : List[str] = {}
lowercase : Optional[int] = {}
# preprocess args
if "points_per_batch" in kwargs:
lowercase : List[Any] = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
lowercase : Tuple = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
lowercase : Any = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
lowercase : Dict = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
lowercase : str = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
lowercase : List[str] = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
lowercase : List[str] = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
lowercase : str = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
lowercase : Optional[int] = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
lowercase : Dict = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
lowercase : int = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
lowercase : Union[str, Any] = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : List[Any], lowerCAmelCase : List[str], *lowerCAmelCase : Optional[Any], lowerCAmelCase : Dict=None, lowerCAmelCase : Union[str, Any]=None, **lowerCAmelCase : int ) -> List[str]:
return super().__call__(lowerCAmelCase, *lowerCAmelCase, num_workers=lowerCAmelCase, batch_size=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : List[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Tuple=64, lowerCAmelCase : int = 0, lowerCAmelCase : float = 512 / 1500, lowerCAmelCase : Optional[int] = 32, lowerCAmelCase : Optional[int] = 1, ) -> Union[str, Any]:
lowercase : List[Any] = load_image(lowerCAmelCase )
lowercase : str = self.image_processor.size['longest_edge']
lowercase , lowercase , lowercase , lowercase : List[Any] = self.image_processor.generate_crop_boxes(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
lowercase : Any = self.image_processor(images=lowerCAmelCase, return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
lowercase : Optional[int] = self.get_inference_context()
with inference_context():
lowercase : List[str] = self._ensure_tensor_on_device(lowerCAmelCase, device=self.device )
lowercase : int = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
lowercase : List[Any] = image_embeddings
lowercase : Dict = grid_points.shape[1]
lowercase : Any = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0, lowerCAmelCase, lowerCAmelCase ):
lowercase : Optional[int] = grid_points[:, i : i + points_per_batch, :, :]
lowercase : List[str] = input_labels[:, i : i + points_per_batch]
lowercase : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowercase ( self : Any, lowerCAmelCase : List[str], lowerCAmelCase : str=0.88, lowerCAmelCase : Optional[int]=0.95, lowerCAmelCase : str=0, lowerCAmelCase : Optional[int]=1, ) -> Optional[int]:
lowercase : Optional[int] = model_inputs.pop('input_boxes' )
lowercase : Any = model_inputs.pop('is_last' )
lowercase : Tuple = model_inputs.pop('original_sizes' ).tolist()
lowercase : Union[str, Any] = model_inputs.pop('reshaped_input_sizes' ).tolist()
lowercase : str = self.model(**lowerCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowercase : str = model_outputs['pred_masks']
lowercase : str = self.image_processor.post_process_masks(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, binarize=lowerCAmelCase )
lowercase : Dict = model_outputs['iou_scores']
lowercase , lowercase , lowercase : int = self.image_processor.filter_masks(
masks[0], iou_scores[0], original_sizes[0], input_boxes[0], lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowercase ( self : Optional[Any], lowerCAmelCase : str, lowerCAmelCase : Tuple=False, lowerCAmelCase : Any=False, lowerCAmelCase : Tuple=0.7, ) -> List[str]:
lowercase : Any = []
lowercase : Optional[Any] = []
lowercase : Optional[Any] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
lowercase : Optional[Any] = torch.cat(lowerCAmelCase )
lowercase : List[Any] = torch.cat(lowerCAmelCase )
lowercase , lowercase , lowercase , lowercase : str = self.image_processor.post_process_for_mask_generation(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
lowercase : str = defaultdict(lowerCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCAmelCase )
lowercase : Dict = {}
if output_rle_mask:
lowercase : Tuple = rle_mask
if output_bboxes_mask:
lowercase : Tuple = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 53
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'spiece.model'}
_snake_case = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class UpperCamelCase ( snake_case_ ):
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="<cls>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ) -> None:
_a : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
_a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
_a : Optional[Any] = 3
_a : Tuple = do_lower_case
_a : Tuple = remove_space
_a : Tuple = keep_accents
_a : Tuple = vocab_file
_a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_a : int = jieba
_a : Tuple = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowercase ( self : Optional[Any] ) -> Any:
return len(self.sp_model )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
_a : Tuple = self.__dict__.copy()
_a : Tuple = None
return state
def __setstate__( self : Any , UpperCAmelCase__ : Dict ) -> Dict:
_a : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple = {}
_a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Dict:
if self.remove_space:
_a : Optional[int] = """ """.join(inputs.strip().split() )
else:
_a : List[Any] = inputs
_a : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_a : Optional[Any] = unicodedata.normalize("""NFKD""" , UpperCAmelCase__ )
_a : Dict = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
_a : Union[str, Any] = outputs.lower()
return outputs
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
_a : str = self.preprocess_text(UpperCAmelCase__ )
_a : Dict = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
_a : Union[str, Any] = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a : Dict = cur_pieces[1:]
else:
_a : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> int:
return self.sp_model.PieceToId(UpperCAmelCase__ )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Any:
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def _lowercase ( self : Any , UpperCAmelCase__ : Any ) -> Dict:
_a : Dict = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , """ """ ).strip()
return out_string
def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Optional[Any] = [self.sep_token_id]
_a : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1]
return ([0] * len(UpperCAmelCase__ )) + [1, 1]
def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Any = [self.sep_token_id]
_a : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Union[str, Any] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , """wb""" ) as fi:
_a : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def _lowercase ( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> List[str]:
_a : Tuple = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Optional[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 294
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''PoolFormerFeatureExtractor''']
UpperCAmelCase = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 172
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCamelCase (a_ :int) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCamelCase () -> Optional[int]:
with parallel_backend('''spark'''):
assert ParallelBackendConfig.backend_name == "spark"
lowercase :Optional[int] = [1, 2, 3]
with pytest.raises(a_):
with parallel_backend('''unsupported backend'''):
map_nested(a_ , a_ , num_proc=2)
with pytest.raises(a_):
with parallel_backend('''unsupported backend'''):
map_nested(a_ , a_ , num_proc=-1)
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1])
def lowerCamelCase (a_ :Union[str, Any]) -> Optional[Any]:
lowercase :Optional[Any] = [1, 2]
lowercase :int = {'''a''': 1, '''b''': 2}
lowercase :List[Any] = {'''a''': [1, 2], '''b''': [3, 4]}
lowercase :Optional[int] = {'''a''': {'''1''': 1}, '''b''': 2}
lowercase :List[Any] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowercase :Optional[int] = [2, 3]
lowercase :Tuple = {'''a''': 2, '''b''': 3}
lowercase :Union[str, Any] = {'''a''': [2, 3], '''b''': [4, 5]}
lowercase :List[str] = {'''a''': {'''1''': 2}, '''b''': 3}
lowercase :Union[str, Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark'''):
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
| 172
| 1
|
from math import ceil, sqrt
def A ( a_ = 1_000_000 ) -> int:
__UpperCamelCase : Optional[int] =0
for outer_width in range(3 ,(limit // 4) + 2 ):
if outer_width**2 > limit:
__UpperCamelCase : int =max(ceil(sqrt(outer_width**2 - limit ) ) ,1 )
else:
__UpperCamelCase : List[Any] =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"{solution() = }")
| 71
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] ="""new-model"""
if is_tf_available():
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[str] =NewModelConfig
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] ='bert-base-cased'
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] ='bert-base-cased'
__UpperCamelCase : Optional[int] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple =TFAutoModelForPreTraining.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : str =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : str =TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Optional[int] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : str =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : List[Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
@require_tensorflow_probability
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__UpperCamelCase : Any =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : List[str] =TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =copy.deepcopy(model.config )
__UpperCamelCase : Optional[Any] =['FunnelBaseModel']
__UpperCamelCase : Tuple =TFAutoModel.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : List[Any] =TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
try:
AutoConfig.register('new-model' , lowerCamelCase__ )
__UpperCamelCase : int =[
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase : List[str] =BertModelTester(self ).get_config()
__UpperCamelCase : Optional[Any] =NewModelConfig(**tiny_config.to_dict() )
__UpperCamelCase : Dict =auto_class.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =auto_class.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , 'bert-base is not a local folder and is not a valid model identifier' ):
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('bert-base' )
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__UpperCamelCase : Union[str, Any] =TFAutoModel.from_pretrained(lowerCamelCase__ , revision='aaaaaa' )
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
__UpperCamelCase : List[str] =TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(lowerCamelCase__ , 'Use `from_pt=True` to load this model' ):
__UpperCamelCase : List[Any] =TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
__UpperCamelCase : Union[str, Any] =TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 71
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase__ = StableDiffusionLDMaDPipeline
UpperCamelCase__ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
a = CLIPTextModel(__magic_name__ )
a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self :List[str] , __magic_name__ :int , __magic_name__ :Dict=0 ):
'''simple docstring'''
if str(__magic_name__ ).startswith("""mps""" ):
a = torch.manual_seed(__magic_name__ )
else:
a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = """cpu""" # ensure determinism for the device-dependent torch.Generator
a = self.get_dummy_components()
a = StableDiffusionLDMaDPipeline(**__magic_name__ )
a = ldmad_pipe.to(__magic_name__ )
ldmad_pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_dummy_inputs(__magic_name__ )
a = ldmad_pipe(**__magic_name__ )
a , a = output.rgb, output.depth
a = rgb[0, -3:, -3:, -1]
a = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] )
a = np.array([103.46727, 85.812004, 87.849236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.get_dummy_components()
a = StableDiffusionLDMaDPipeline(**__magic_name__ )
a = ldmad_pipe.to(__magic_name__ )
ldmad_pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_dummy_inputs(__magic_name__ )
a = 3 * [inputs["""prompt"""]]
# forward
a = ldmad_pipe(**__magic_name__ )
a , a = output.rgb, output.depth
a = rgb_slice_a[0, -3:, -3:, -1]
a = depth_slice_a[0, -3:, -1]
a = self.get_dummy_inputs(__magic_name__ )
a = 3 * [inputs.pop("""prompt""" )]
a = ldmad_pipe.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__magic_name__ , return_tensors="""pt""" , )
a = text_inputs["""input_ids"""].to(__magic_name__ )
a = ldmad_pipe.text_encoder(__magic_name__ )[0]
a = prompt_embeds
# forward
a = ldmad_pipe(**__magic_name__ )
a , a = output.rgb, output.depth
a = rgb_slice_a[0, -3:, -3:, -1]
a = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = """cpu""" # ensure determinism for the device-dependent torch.Generator
a = self.get_dummy_components()
a = PNDMScheduler(skip_prk_steps=__magic_name__ )
a = StableDiffusionLDMaDPipeline(**__magic_name__ )
a = ldmad_pipe.to(__magic_name__ )
ldmad_pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_dummy_inputs(__magic_name__ )
a = """french fries"""
a = ldmad_pipe(**__magic_name__ , negative_prompt=__magic_name__ )
a , a = output.rgb, output.depth
a = rgb[0, -3:, -3:, -1]
a = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] )
a = np.array([107.84738, 84.62802, 89.962135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :List[Any]="cpu" , __magic_name__ :Optional[int]=torch.floataa , __magic_name__ :int=0 ):
'''simple docstring'''
a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
a = np.random.RandomState(__magic_name__ ).standard_normal((1, 4, 64, 64) )
a = torch.from_numpy(__magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ )
a = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
a = ldmad_pipe.to(__magic_name__ )
ldmad_pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_inputs(__magic_name__ )
a = ldmad_pipe(**__magic_name__ )
a , a = output.rgb, output.depth
a = rgb[0, -3:, -3:, -1].flatten()
a = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
a = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] )
a = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :str , __magic_name__ :List[str]="cpu" , __magic_name__ :Optional[int]=torch.floataa , __magic_name__ :List[str]=0 ):
'''simple docstring'''
a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
a = np.random.RandomState(__magic_name__ ).standard_normal((1, 4, 64, 64) )
a = torch.from_numpy(__magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ )
a = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(__magic_name__ )
ldmad_pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_inputs(__magic_name__ )
a = ldmad_pipe(**__magic_name__ )
a , a = output.rgb, output.depth
a = 0.495586
a = 0.33795515
a = 112.48518
a = 98.489746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(__magic_name__ )
ldmad_pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_inputs(__magic_name__ )
a = ldmad_pipe(**__magic_name__ )
a , a = output.rgb, output.depth
a = 0.4194127
a = 0.35375586
a = 0.5638502
a = 0.34686103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 356
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
UpperCamelCase__ = '''nat'''
UpperCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Any , __magic_name__ :int=4 , __magic_name__ :Dict=3 , __magic_name__ :List[str]=64 , __magic_name__ :Optional[int]=[3, 4, 6, 5] , __magic_name__ :int=[2, 4, 8, 16] , __magic_name__ :str=7 , __magic_name__ :Tuple=3.0 , __magic_name__ :Dict=True , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1E-5 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=None , __magic_name__ :Any=None , **__magic_name__ :Dict , ):
'''simple docstring'''
super().__init__(**__magic_name__ )
a = patch_size
a = num_channels
a = embed_dim
a = depths
a = len(__magic_name__ )
a = num_heads
a = kernel_size
a = mlp_ratio
a = qkv_bias
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = drop_path_rate
a = hidden_act
a = layer_norm_eps
a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) )
a = layer_scale_init_value
a = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
| 347
| 0
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
def __init__( self : str , UpperCAmelCase : str=None , **UpperCAmelCase : int ) -> Optional[Any]:
super().__init__(features=UpperCAmelCase )
lowerCamelCase__ : int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def A_ ( self : Union[str, Any] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCAmelCase )
return column
def A_ ( self : str , UpperCAmelCase : Dict ) -> List[Any]:
import torch
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCamelCase__ : Union[str, Any] = {}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowerCamelCase__ : Optional[Any] = {'dtype': torch.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCamelCase__ : List[Any] = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
lowerCamelCase__ : List[str] = np.asarray(UpperCAmelCase )
return torch.tensor(UpperCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def A_ ( self : str , UpperCAmelCase : Tuple ) -> Optional[Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCAmelCase , '__array__' ) and not isinstance(UpperCAmelCase , torch.Tensor ):
lowerCamelCase__ : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def A_ ( self : List[str] , UpperCAmelCase : dict ) -> List[str]:
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def A_ ( self : Tuple , UpperCAmelCase : pa.Table ) -> Mapping:
lowerCamelCase__ : Tuple = self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
lowerCamelCase__ : List[Any] = self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def A_ ( self : List[str] , UpperCAmelCase : pa.Table ) -> "torch.Tensor":
lowerCamelCase__ : Optional[int] = self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
lowerCamelCase__ : int = self.recursive_tensorize(UpperCAmelCase )
lowerCamelCase__ : List[Any] = self._consolidate(UpperCAmelCase )
return column
def A_ ( self : Optional[int] , UpperCAmelCase : pa.Table ) -> Mapping:
lowerCamelCase__ : Optional[int] = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
lowerCamelCase__ : List[Any] = self.python_features_decoder.decode_batch(UpperCAmelCase )
lowerCamelCase__ : Any = self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
lowerCamelCase__ : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 50
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = jnp.floataa
def A_ ( self : Any ) -> Any:
lowerCamelCase__ : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = hidden_states.shape
lowerCamelCase__ : Union[str, Any] = jax.image.resize(
UpperCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase )
return hidden_states
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = jnp.floataa
def A_ ( self : List[str] ) -> int:
lowerCamelCase__ : Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase )
return hidden_states
class lowerCAmelCase ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = None
UpperCAmelCase__ = jnp.floataa
def A_ ( self : List[str] ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
lowerCamelCase__ : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : int = nn.Conv(
UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ : Union[str, Any] = nn.Dense(UpperCAmelCase , dtype=self.dtype )
lowerCamelCase__ : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : List[Any] = nn.Dropout(self.dropout_prob )
lowerCamelCase__ : Tuple = nn.Conv(
UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowerCamelCase__ : Union[str, Any] = None
if use_nin_shortcut:
lowerCamelCase__ : Dict = nn.Conv(
UpperCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=True ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = hidden_states
lowerCamelCase__ : List[Any] = self.norma(UpperCAmelCase )
lowerCamelCase__ : List[Any] = nn.swish(UpperCAmelCase )
lowerCamelCase__ : Any = self.conva(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.time_emb_proj(nn.swish(UpperCAmelCase ) )
lowerCamelCase__ : List[str] = jnp.expand_dims(jnp.expand_dims(UpperCAmelCase , 1 ) , 1 )
lowerCamelCase__ : List[str] = hidden_states + temb
lowerCamelCase__ : Optional[Any] = self.norma(UpperCAmelCase )
lowerCamelCase__ : List[str] = nn.swish(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = self.dropout(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : str = self.conva(UpperCAmelCase )
if self.conv_shortcut is not None:
lowerCamelCase__ : Dict = self.conv_shortcut(UpperCAmelCase )
return hidden_states + residual
| 50
| 1
|
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
A : List[Any] = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
A : Dict = 10
A : Optional[Any] = 256
def lowerCAmelCase__ ( lowerCamelCase : List[str] ):
if len(lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_A : Dict = MinHash(num_perm=lowerCamelCase )
for token in set(lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def lowerCAmelCase__ ( lowerCamelCase : str ):
return {t for t in NON_ALPHA.split(lowerCamelCase ) if len(t.strip() ) > 0}
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : List[str] , *,
SCREAMING_SNAKE_CASE : float = 0.85 , ):
_A : int = duplication_jaccard_threshold
_A : int = NUM_PERM
_A : Dict = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
_A : Optional[Any] = defaultdict(SCREAMING_SNAKE_CASE)
def A ( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : MinHash):
_A : str = self._index.query(SCREAMING_SNAKE_CASE)
if code_key in self._index.keys:
print(F'Duplicate key {code_key}')
return
self._index.insert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
if len(SCREAMING_SNAKE_CASE) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE)
def A ( self : Optional[int]):
_A : int = []
for base, duplicates in self._duplicate_clusters.items():
_A : Optional[int] = [base] + list(SCREAMING_SNAKE_CASE)
# reformat the cluster to be a list of dict
_A : Union[str, Any] = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE)
return duplicate_clusters
def A ( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any]):
_A : List[Any] = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE , 'w') as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def lowerCAmelCase__ ( lowerCamelCase : int ):
_A , _A : List[str] = element
_A : Any = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase__ ( lowerCamelCase : Type[Dataset] ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def lowerCAmelCase__ ( lowerCamelCase : Type[Dataset] ,lowerCamelCase : float ):
_A : str = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(lowerCamelCase ,lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase__ ( lowerCamelCase : str ,lowerCamelCase : str ):
_A : Union[str, Any] = get_tokens(lowerCamelCase )
_A : Optional[int] = get_tokens(lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
A : Optional[Any] = None
def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : Union[str, Any] ):
_A : Any = []
for elementa in cluster:
_A : List[Any] = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
_A : Optional[Any] = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(lowerCamelCase ,lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_A : Optional[Any] = 1
extremes.append(lowerCamelCase )
return extremes
def lowerCAmelCase__ ( lowerCamelCase : Optional[Any] ,lowerCamelCase : List[str] ,lowerCamelCase : Dict ):
global _shared_dataset
_A : List[Any] = dataset
_A : Optional[Any] = []
_A : Tuple = partial(_find_cluster_extremes_shared ,jaccard_threshold=lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCamelCase ,lowerCamelCase ,) ,total=len(lowerCamelCase ) ,):
extremes_list.append(lowerCamelCase )
return extremes_list
def lowerCAmelCase__ ( lowerCamelCase : Type[Dataset] ,lowerCamelCase : float = 0.85 ):
_A : List[str] = make_duplicate_clusters(lowerCamelCase ,lowerCamelCase )
_A : List[str] = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
_A : int = {}
_A : Tuple = find_extremes(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_A : str = element
_A : str = duplicate_indices - set(extreme_dict.keys() )
_A : Union[str, Any] = dataset.filter(lambda lowerCamelCase ,lowerCamelCase : idx not in remove_indices ,with_indices=lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_A : Optional[int] = element['base_index'] in extreme_dict
if element["is_extreme"]:
_A : Union[str, Any] = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(lowerCamelCase )}' )
print(F'Number of duplicate clusters: {len(lowerCamelCase )}' )
print(F'Files in duplicate cluster: {len(lowerCamelCase )}' )
print(F'Unique files in duplicate cluster: {len(lowerCamelCase )}' )
print(F'Filtered dataset size: {len(lowerCamelCase )}' )
return ds_filter, duplicate_clusters
| 227
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A : Tuple = logging.get_logger(__name__)
A : Tuple = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "longformer"
def __init__( self : Any , SCREAMING_SNAKE_CASE : Union[List[int], int] = 512 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 30522 , SCREAMING_SNAKE_CASE : int = 768 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 3072 , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : float = 1e-12 , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : List[Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_A : List[Any] = attention_window
_A : int = sep_token_id
_A : Tuple = bos_token_id
_A : Any = eos_token_id
_A : List[str] = vocab_size
_A : Any = hidden_size
_A : Optional[int] = num_hidden_layers
_A : int = num_attention_heads
_A : Dict = hidden_act
_A : List[Any] = intermediate_size
_A : int = hidden_dropout_prob
_A : Optional[int] = attention_probs_dropout_prob
_A : int = max_position_embeddings
_A : Any = type_vocab_size
_A : Dict = initializer_range
_A : Any = layer_norm_eps
_A : List[Any] = onnx_export
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : "PretrainedConfig" , SCREAMING_SNAKE_CASE : str = "default" , SCREAMING_SNAKE_CASE : "List[PatchingSpec]" = None):
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : Optional[Any] = True
@property
def A ( self : List[str]):
if self.task == "multiple-choice":
_A : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
])
@property
def A ( self : str):
_A : int = super().outputs
if self.task == "default":
_A : str = {0: 'batch'}
return outputs
@property
def A ( self : List[Any]):
return 1e-4
@property
def A ( self : Dict):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14)
def A ( self : str , SCREAMING_SNAKE_CASE : "PreTrainedTokenizerBase" , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ):
_A : Union[str, Any] = super().generate_dummy_inputs(
preprocessor=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE)
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_A : Tuple = torch.zeros_like(inputs['input_ids'])
# make every second token global
_A : Dict = 1
return inputs
| 227
| 1
|
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
__lowerCAmelCase = 'src/transformers'
# Matches is_xxx_available()
__lowerCAmelCase = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__lowerCAmelCase = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowerCAmelCase = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__lowerCAmelCase = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__lowerCAmelCase = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowerCAmelCase = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__lowerCAmelCase = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowerCAmelCase = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__lowerCAmelCase = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__lowerCAmelCase = re.compile(r'^\s*try:')
# Catches a line with else:
__lowerCAmelCase = re.compile(r'^\s*else:')
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if _re_test_backend.search(_SCREAMING_SNAKE_CASE ) is None:
return None
_snake_case = [b[0] for b in _re_backend.findall(_SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_snake_case = f.readlines()
_snake_case = 0
while line_index < len(_SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
_snake_case = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
_snake_case = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ):
_snake_case = _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ).groups()[0]
_snake_case = re.findall(R"""\[([^\]]+)\]""" , _SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
_snake_case = _re_import_struct_key_value.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
_snake_case = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
_snake_case = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_snake_case = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_snake_case = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_snake_case = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
_snake_case = lines[line_index]
if _re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ) is not None:
_snake_case = _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(""", """ )
_snake_case = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(_SCREAMING_SNAKE_CASE ) is not None:
_snake_case = _re_between_brackets.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(""", """ )
_snake_case = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
_snake_case = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_snake_case = []
while (
line_index < len(_SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
_snake_case = lines[line_index]
_snake_case = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
_snake_case = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(_SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
_snake_case = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_snake_case = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_snake_case = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
_snake_case = lines[line_index]
_snake_case = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
_snake_case = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
def find_duplicates(_SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(_SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_snake_case = []
for key in import_dict_objects.keys():
_snake_case = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
_snake_case = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_snake_case = """base imports""" if key == """none""" else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = []
for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
_snake_case = os.path.join(_SCREAMING_SNAKE_CASE , """__init__.py""" )
_snake_case = parse_init(_SCREAMING_SNAKE_CASE )
if objects is not None:
_snake_case = analyze_results(*_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_snake_case = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError("""\n\n""".join(_SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = []
for path, directories, files in os.walk(_SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(_SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_SCREAMING_SNAKE_CASE ) / folder).glob("""*.py""" ) ) ) == 0:
continue
_snake_case = str((Path(_SCREAMING_SNAKE_CASE ) / folder).relative_to(_SCREAMING_SNAKE_CASE ) )
_snake_case = short_path.replace(os.path.sep , """.""" )
submodules.append(_SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
_snake_case = str((Path(_SCREAMING_SNAKE_CASE ) / fname).relative_to(_SCREAMING_SNAKE_CASE ) )
_snake_case = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(_SCREAMING_SNAKE_CASE )
return submodules
__lowerCAmelCase = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def __SCREAMING_SNAKE_CASE ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_snake_case = direct_transformers_import(_SCREAMING_SNAKE_CASE )
_snake_case = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_SCREAMING_SNAKE_CASE , """__init__.py""" ) , """r""" ) as f:
_snake_case = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , _SCREAMING_SNAKE_CASE ) ) )
_snake_case = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_SCREAMING_SNAKE_CASE ) > 0:
_snake_case = """\n""".join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f"""{list_of_modules}\n"""
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 341
|
'''simple docstring'''
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = f.readlines()
_snake_case = f"""class {class_name}("""
_snake_case = f"""{4 * " "}def {test_name}("""
_snake_case = f"""{8 * " "}{correct_line.split()[0]}"""
_snake_case = f"""{16 * " "}{correct_line.split()[0]}"""
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = 0
_snake_case = 0
_snake_case = []
for line in lines:
if line.startswith(_SCREAMING_SNAKE_CASE ):
_snake_case = True
elif in_class and line.startswith(_SCREAMING_SNAKE_CASE ):
_snake_case = True
elif in_class and in_func and (line.startswith(_SCREAMING_SNAKE_CASE ) or line.startswith(_SCREAMING_SNAKE_CASE )):
_snake_case = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_snake_case = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_snake_case = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * " "}{correct_line}""" )
_snake_case = _snake_case = _snake_case = _snake_case = False
else:
new_lines.append(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
for line in new_lines:
f.write(_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if fail is not None:
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = {l.strip() for l in f.readlines()}
else:
_snake_case = None
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = f.readlines()
_snake_case = defaultdict(_SCREAMING_SNAKE_CASE )
for line in correct_lines:
_snake_case, _snake_case, _snake_case, _snake_case = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
__lowerCAmelCase = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 341
| 1
|
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def UpperCamelCase ( a ) -> float:
'''simple docstring'''
return np.dot(a , a )
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , *,
a__ : float = np.inf , a__ : str = "linear" , a__ : float = 0.0 , ):
__magic_name__ = regularization
__magic_name__ = gamma
if kernel == "linear":
__magic_name__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
__magic_name__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__magic_name__ = F'''Unknown kernel: {kernel}'''
raise ValueError(a__ )
def snake_case__ ( self : List[Any] , a__ : ndarray , a__ : ndarray ):
return np.dot(a__ , a__ )
def snake_case__ ( self : int , a__ : ndarray , a__ : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def snake_case__ ( self : Union[str, Any] , a__ : list[ndarray] , a__ : ndarray ):
__magic_name__ = observations
__magic_name__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__magic_name__ ) , ) = np.shape(a__ )
def to_minimize(a__ : ndarray ) -> float:
__magic_name__ = 0
((__magic_name__ ) , ) = np.shape(a__ )
for i in range(a__ ):
for j in range(a__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(a__ )
__magic_name__ = LinearConstraint(a__ , 0 , 0 )
__magic_name__ = Bounds(0 , self.regularization )
__magic_name__ = minimize(
a__ , np.ones(a__ ) , bounds=a__ , constraints=[ly_contraint] ).x
__magic_name__ = l_star
# calculating mean offset of separation plane to points
__magic_name__ = 0
for i in range(a__ ):
for j in range(a__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__magic_name__ = s / n
def snake_case__ ( self : str , a__ : ndarray ):
__magic_name__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , a__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCAmelCase = get_tests_dir("fixtures")
_lowerCAmelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_lowerCAmelCase = get_tests_dir("fixtures/dummy-config.json")
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = 0
def snake_case__ ( self : Optional[int] ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ ).to_dict()
config_dict.pop('''feature_extractor_type''' )
__magic_name__ = WavaVecaFeatureExtractor(**a__ )
# save in new folder
model_config.save_pretrained(a__ )
config.save_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
# make sure private variable is not incorrectly saved
__magic_name__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : str ):
with self.assertRaisesRegex(
a__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def snake_case__ ( self : str ):
with self.assertRaisesRegex(
a__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ , revision='''aaaaaa''' )
def snake_case__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
a__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def snake_case__ ( self : Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a__ ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a__ ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ , trust_remote_code=a__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def snake_case__ ( self : int ):
try:
AutoConfig.register('''custom''' , a__ )
AutoFeatureExtractor.register(a__ , a__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a__ ):
AutoFeatureExtractor.register(a__ , a__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__magic_name__ = CustomFeatureExtractor.from_pretrained(a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case__ ( self : int ):
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[int] = True
try:
AutoConfig.register('''custom''' , a__ )
AutoFeatureExtractor.register(a__ , a__ )
# If remote code is not set, the default is to use local
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(a__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 98
| 0
|
from __future__ import annotations
import pandas as pd
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = [0] * no_of_processes
_lowerCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(snake_case ):
_lowerCAmelCase = burst_time[i]
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 9_99_99_99_99
_lowerCAmelCase = 0
_lowerCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(snake_case ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
_lowerCAmelCase = remaining_time[j]
_lowerCAmelCase = j
_lowerCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
_lowerCAmelCase = remaining_time[short]
if minm == 0:
_lowerCAmelCase = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
_lowerCAmelCase = False
# Find finish time of current process
_lowerCAmelCase = increment_time + 1
# Calculate waiting time
_lowerCAmelCase = finish_time - arrival_time[short]
_lowerCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
_lowerCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = [0] * no_of_processes
for i in range(snake_case ):
_lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for i in range(snake_case ):
_lowerCAmelCase = total_waiting_time + waiting_time[i]
_lowerCAmelCase = total_turn_around_time + turn_around_time[i]
print(F'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
A__ = int(input())
A__ = [0] * no_of_processes
A__ = [0] * no_of_processes
A__ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
A__ , A__ = map(int, input().split())
A__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
A__ = burst_time
A__ = no_of_processes
A__ = waiting_time
A__ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
A__ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 82
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : Dict =logging.get_logger(__name__)
__snake_case : Optional[int] ={
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""xlm-roberta-xl"""
def __init__(self ,__lowerCamelCase=25_08_80 ,__lowerCamelCase=25_60 ,__lowerCamelCase=36 ,__lowerCamelCase=32 ,__lowerCamelCase=1_02_40 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=5_14 ,__lowerCamelCase=1 ,__lowerCamelCase=0.02 ,__lowerCamelCase=1e-05 ,__lowerCamelCase=1 ,__lowerCamelCase=0 ,__lowerCamelCase=2 ,__lowerCamelCase="absolute" ,__lowerCamelCase=True ,__lowerCamelCase=None ,**__lowerCamelCase ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : Tuple = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : List[str] = type_vocab_size
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Union[str, Any] = position_embedding_type
lowerCAmelCase__ : Union[str, Any] = use_cache
lowerCAmelCase__ : str = classifier_dropout
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
@property
def lowerCAmelCase__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase__ : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase__ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 129
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_A = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether tp freeze the encoder."} )
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCAmelCase__ : Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
UpperCAmelCase__ : Optional[int] = field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
UpperCAmelCase__ : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
UpperCAmelCase__ : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "Source language id for translation."} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "Target language id for translation."} )
UpperCAmelCase__ : Optional[int] = field(default=A_ , metadata={"help": "# num_beams to use for evaluation."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def lowercase_ ( A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(A__ , os.path.join(A__ , F'{split}_results.json' ) )
def lowercase_ ( ) -> Tuple:
"""simple docstring"""
snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case , snake_case , snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case , snake_case , snake_case = parser.parse_args_into_dataclasses()
check_output_dir(A__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , A__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(A__ , A__ , A__ ):
assert hasattr(A__ , A__ ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(A__ , A__ , getattr(A__ , A__ ) )
snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=A__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(A__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(A__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(A__ , A__ ):
snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(A__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
snake_case = SeqaSeqDataset
# Get datasets
snake_case = (
dataset_class(
A__ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
snake_case = (
dataset_class(
A__ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
snake_case = (
dataset_class(
A__ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
snake_case = (
build_compute_metrics_fn(data_args.task , A__ ) if training_args.predict_with_generate else None
)
snake_case = SeqaSeqTrainer(
model=A__ , args=A__ , data_args=A__ , train_dataset=A__ , eval_dataset=A__ , data_collator=SeqaSeqDataCollator(
A__ , A__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=A__ , tokenizer=A__ , )
snake_case = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
snake_case = train_result.metrics
snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , A__ , training_args.output_dir )
all_metrics.update(A__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case = trainer.evaluate(metric_key_prefix="val" )
snake_case = data_args.n_val
snake_case = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , A__ , training_args.output_dir )
all_metrics.update(A__ )
if training_args.do_predict:
logger.info("*** Predict ***" )
snake_case = trainer.predict(test_dataset=A__ , metric_key_prefix="test" )
snake_case = test_output.metrics
snake_case = data_args.n_test
if trainer.is_world_process_zero():
snake_case = round(metrics["test_loss"] , 4 )
handle_metrics("test" , A__ , training_args.output_dir )
all_metrics.update(A__ )
if training_args.predict_with_generate:
snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
snake_case = lmap(str.strip , A__ )
write_txt_file(A__ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(A__ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def lowercase_ ( A__ ) -> Optional[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 137
|
from collections import defaultdict
class lowerCamelCase :
def __init__(self : Tuple , _A : Optional[int] , _A : List[str] ) -> Union[str, Any]:
snake_case = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
snake_case = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_A ) )
]
snake_case = defaultdict(_A ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
snake_case = (1 << len(_A )) - 1
def UpperCAmelCase(self : str , _A : Optional[Any] , _A : List[Any] ) -> str:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
snake_case = self.count_ways_until(_A , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
snake_case = total_ways_util
return self.dp[mask][task_no]
def UpperCAmelCase(self : Any , _A : Dict ) -> Optional[Any]:
# Store the list of persons for each task
for i in range(len(_A ) ):
for j in task_performed[i]:
self.task[j].append(_A )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_A = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_A = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 137
| 1
|
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = 4_2
lowerCamelCase_ = 4_2
lowerCamelCase_ = 4_2
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = 4_2
lowerCamelCase_ = 4_2
lowerCamelCase_ = None
lowerCamelCase_ = None
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase_ = '''train'''
lowerCamelCase_ = '''dev'''
lowerCamelCase_ = '''test'''
class UpperCAmelCase :
'''simple docstring'''
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase , lowercase , lowercase , lowercase=False , lowercase="[CLS]" , lowercase=1 , lowercase="[SEP]" , lowercase=False , lowercase=False , lowercase=0 , lowercase=0 , lowercase=-1_0_0 , lowercase=0 , lowercase=True , ):
"""simple docstring"""
A_ : Any = {label: i for i, label in enumerate(lowercase )}
A_ : str = []
for ex_index, example in enumerate(lowercase ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('Writing example %d of %d' , lowercase , len(lowercase ) )
A_ : Dict = []
A_ : List[Any] = []
for word, label in zip(example.words , example.labels ):
A_ : Any = tokenizer.tokenize(lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowercase ) > 0:
tokens.extend(lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
A_ : str = tokenizer.num_special_tokens_to_add()
if len(lowercase ) > max_seq_length - special_tokens_count:
A_ : Any = tokens[: (max_seq_length - special_tokens_count)]
A_ : List[Any] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
A_ : Union[str, Any] = [sequence_a_segment_id] * len(lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
A_ : Dict = [cls_token] + tokens
A_ : Union[str, Any] = [pad_token_label_id] + label_ids
A_ : Union[str, Any] = [cls_token_segment_id] + segment_ids
A_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
A_ : Tuple = [1 if mask_padding_with_zero else 0] * len(lowercase )
# Zero-pad up to the sequence length.
A_ : int = max_seq_length - len(lowercase )
if pad_on_left:
A_ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
A_ : Optional[Any] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
A_ : int = ([pad_token_segment_id] * padding_length) + segment_ids
A_ : Tuple = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(lowercase ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(lowercase ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(lowercase ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(lowercase ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
A_ : str = None
features.append(
InputFeatures(
input_ids=lowercase , attention_mask=lowercase , token_type_ids=lowercase , label_ids=lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase_ = 4_2
lowerCamelCase_ = nn.CrossEntropyLoss().ignore_index
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase=False , lowercase = Split.train , ):
"""simple docstring"""
A_ : Tuple = os.path.join(
lowercase , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : Tuple = cached_features_file + '.lock'
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
A_ : Dict = torch.load(lowercase )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
A_ : str = token_classification_task.read_examples_from_file(lowercase , lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
A_ : Optional[Any] = token_classification_task.convert_examples_to_features(
lowercase , lowercase , lowercase , lowercase , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , lowercase )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , lowercase ):
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = 4_2
lowerCamelCase_ = -1_0_0
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase=False , lowercase = Split.train , ):
"""simple docstring"""
A_ : Dict = token_classification_task.read_examples_from_file(lowercase , lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
A_ : Union[str, Any] = token_classification_task.convert_examples_to_features(
lowercase , lowercase , lowercase , lowercase , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
A_ : Dict = tf.data.Dataset.from_generator(
lowercase , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
A_ : List[Any] = tf.data.Dataset.from_generator(
lowercase , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , lowercase ):
"""simple docstring"""
return self.features[i]
| 140
|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :int = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
SCREAMING_SNAKE_CASE :int = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = []
def __init__( self : Any ,A : List[str] ,A : str="<unk>" ,A : int="<s>" ,A : Union[str, Any]="</s>" ,A : List[str]="<pad>" ,A : int="[SEP]" ,A : Optional[Any]="[MASK]" ,A : Tuple="[CLS]" ,A : Optional[Dict[str, Any]] = None ,**A : Any ,):
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else bos_token
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else eos_token
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else unk_token
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else pad_token
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else cls_token
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A ,eos_token=A ,unk_token=A ,pad_token=A ,sep_token=A ,mask_token=A ,cls_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
__A = vocab_file
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCamelCase_ ( self : List[str] ):
return self.sp_model.get_piece_size()
def UpperCamelCase_ ( self : Optional[Any] ):
__A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : str ,A : Optional[Any] ):
__A = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self : Any ,A : str ):
return self.sp_model.encode(A ,out_type=A )
def UpperCamelCase_ ( self : List[str] ,A : Tuple ):
return self.sp_model.piece_to_id(A )
def UpperCamelCase_ ( self : List[Any] ,A : Tuple ):
__A = self.sp_model.IdToPiece(A )
return token
def UpperCamelCase_ ( self : List[Any] ,A : int ):
__A = []
__A = ""
__A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
__A = True
__A = []
else:
current_sub_tokens.append(A )
__A = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCamelCase_ ( self : Tuple ,A : List[int] ,A : bool = False ,A : bool = None ,A : bool = True ,**A : Union[str, Any] ,):
__A = kwargs.pop("use_source_tokenizer" ,A )
__A = self.convert_ids_to_tokens(A ,skip_special_tokens=A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__A = []
__A = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A ) )
__A = []
sub_texts.append(A )
else:
current_sub_text.append(A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__A = re.sub(R" (\[(MASK|SEP)\])" ,R"\1" ," ".join(A ) )
else:
__A = "".join(A )
__A = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__A = self.clean_up_tokenization(A )
return clean_text
else:
return text
def UpperCamelCase_ ( self : str ,A : str ,A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__A = os.path.join(
A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A )
elif not os.path.isfile(self.vocab_file ):
with open(A ,"wb" ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A = [self.cls_token_id]
__A = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Optional[int] ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def UpperCamelCase_ ( self : Any ,A : List[int] ,A : Optional[List[int]] = None ):
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 15
| 0
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowercase = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ = set()
UpperCamelCase__ = []
def parse_line(UpperCamelCase__ : str ):
for line in fp:
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
UpperCamelCase__ = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(UpperCamelCase__ ) > 0:
UpperCamelCase__ = '''\n'''.join(UpperCamelCase__ )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(UpperCamelCase__ )
buffer.clear()
continue
else:
UpperCamelCase__ = line.strip()
buffer.append(UpperCamelCase__ )
if from_gh:
for filename in os.listdir(UpperCamelCase__ ):
UpperCamelCase__ = os.path.join(UpperCamelCase__, UpperCamelCase__ )
if not os.path.isdir(UpperCamelCase__ ):
# read the file
if filename != "warnings.txt":
continue
with open(UpperCamelCase__ ) as fp:
parse_line(UpperCamelCase__ )
else:
try:
with zipfile.ZipFile(UpperCamelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase__ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(UpperCamelCase__ ) as fp:
parse_line(UpperCamelCase__ )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def lowerCamelCase_ ( UpperCamelCase__ : List[str], UpperCamelCase__ : List[str] ):
'''simple docstring'''
UpperCamelCase__ = set()
UpperCamelCase__ = [os.path.join(UpperCamelCase__, UpperCamelCase__ ) for p in os.listdir(UpperCamelCase__ ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(UpperCamelCase__, UpperCamelCase__ ) )
return selected_warnings
if __name__ == "__main__":
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
return values.split(''',''' )
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
lowercase = parser.parse_args()
lowercase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowercase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowercase = extract_warnings(args.output_dir, args.targets)
lowercase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 352
|
import datasets
from .evaluate import evaluate
lowercase = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
lowercase = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
lowercase = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def A_ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def A_ ( self : Optional[Any] , _a : List[str] , _a : Optional[int] ):
UpperCamelCase__ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
UpperCamelCase__ = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
UpperCamelCase__ = evaluate(dataset=_a , predictions=_a )
return score
| 35
| 0
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
_UpperCAmelCase : int = {"""facebook/blenderbot-3B""": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
snake_case_ = bs[:]
snake_case_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
snake_case_ = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = set()
snake_case_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ = char
return pairs
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask''']
def __init__( self , snake_case , snake_case , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , **snake_case , ):
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
errors=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , **snake_case , )
with open(snake_case , encoding='utf-8' ) as vocab_handle:
snake_case_ = json.load(snake_case )
snake_case_ = {v: k for k, v in self.encoder.items()}
snake_case_ = errors # how to handle errors in decoding
snake_case_ = bytes_to_unicode()
snake_case_ = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case , encoding='utf-8' ) as merges_handle:
snake_case_ = merges_handle.read().split('\n' )[1:-1]
snake_case_ = [tuple(merge.split() ) for merge in bpe_merges]
snake_case_ = dict(zip(snake_case , range(len(snake_case ) ) ) )
snake_case_ = {}
snake_case_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case_ = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def a ( self ):
return len(self.encoder )
def a ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def a ( self , snake_case ):
if token in self.cache:
return self.cache[token]
snake_case_ = tuple(snake_case )
snake_case_ = get_pairs(snake_case )
if not pairs:
return token
while True:
snake_case_ = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ = bigram
snake_case_ = []
snake_case_ = 0
while i < len(snake_case ):
try:
snake_case_ = word.index(snake_case , snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ = j
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ = tuple(snake_case )
snake_case_ = new_word
if len(snake_case ) == 1:
break
else:
snake_case_ = get_pairs(snake_case )
snake_case_ = ' '.join(snake_case )
snake_case_ = word
return word
def a ( self , snake_case ):
snake_case_ = []
for token in re.findall(self.pat , snake_case ):
snake_case_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case ).split(' ' ) )
return bpe_tokens
def a ( self , snake_case ):
return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) )
def a ( self , snake_case ):
return self.decoder.get(snake_case )
def a ( self , snake_case ):
snake_case_ = ''.join(snake_case )
snake_case_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def a ( self , snake_case , snake_case = None ):
if not os.path.isdir(snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
snake_case_ = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + '\n' )
snake_case_ = 0
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
snake_case_ = token_index
writer.write(' '.join(snake_case ) + '\n' )
index += 1
return vocab_file, merge_file
def a ( self , snake_case , snake_case = None , snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def a ( self , snake_case , snake_case = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case , snake_case=False , **snake_case ):
snake_case_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case ) > 0 and not text[0].isspace()):
snake_case_ = ' ' + text
return (text, kwargs)
def a ( self , snake_case , snake_case = None ):
return token_ids_a + [self.eos_token_id]
def a ( self , snake_case ):
snake_case_ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(snake_case )
snake_case_ = ' '.join(snake_case )
snake_case_ = self.encode(snake_case )
if len(snake_case ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 285
|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_UpperCAmelCase : Optional[int] = 5_0000
_UpperCAmelCase : Dict = 5000
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = os.path.split(__file__)
_UpperCAmelCase : List[str] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for i in range(UpperCamelCase__ ):
snake_case_ = dataset[i]
@get_duration
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ):
snake_case_ = dataset[i : i + batch_size]
@get_duration
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
with dataset.formatted_as(type=UpperCamelCase__ ):
for i in range(UpperCamelCase__ ):
snake_case_ = dataset[i]
@get_duration
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
with dataset.formatted_as(type=UpperCamelCase__ ):
for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
snake_case_ = dataset[i : i + batch_size]
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = {'num examples': SPEED_TEST_N_EXAMPLES}
snake_case_ = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
snake_case_ = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
snake_case_ = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
snake_case_ = generate_example_dataset(
os.path.join(UpperCamelCase__ , 'dataset.arrow' ) , UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes={'list': (100,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(UpperCamelCase__ ) )
snake_case_ = func(UpperCamelCase__ , **UpperCamelCase__ )
print('shuffling dataset' )
snake_case_ = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(UpperCamelCase__ ) )
snake_case_ = func(
UpperCamelCase__ , **UpperCamelCase__ )
with open(UpperCamelCase__ , 'wb' ) as f:
f.write(json.dumps(UpperCamelCase__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 285
| 1
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ : Dict =get_tests_dir('fixtures')
lowerCAmelCase__ : int =get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase__ : int =get_tests_dir('fixtures/dummy-config.json')
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : List[str] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase__ ).to_dict()
config_dict.pop('feature_extractor_type' )
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaFeatureExtractor(**lowerCAmelCase__ )
# save in new folder
model_config.save_pretrained(lowerCAmelCase__ )
config.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase__ )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase__ , 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE_ : Dict = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase__ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase__ , revision='aaaaaa' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
SCREAMING_SNAKE_CASE_ : int = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = AutoFeatureExtractor.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
try:
AutoConfig.register('custom' , lowerCAmelCase__ )
AutoFeatureExtractor.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoFeatureExtractor.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_ : List[Any] = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = AutoFeatureExtractor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self ):
"""simple docstring"""
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = True
try:
AutoConfig.register('custom' , lowerCAmelCase__ )
AutoFeatureExtractor.register(lowerCAmelCase__ , lowerCAmelCase__ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_ : List[str] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_ : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowerCAmelCase__ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 162
|
# Lint as: python3
import itertools
import os
import re
lowerCAmelCase__ : Optional[int] =re.compile(R'([A-Z]+)([A-Z][a-z])')
lowerCAmelCase__ : List[Any] =re.compile(R'([a-z\d])([A-Z])')
lowerCAmelCase__ : Dict =re.compile(R'(?<!_)_(?!_)')
lowerCAmelCase__ : int =re.compile(R'(_{2,})')
lowerCAmelCase__ : Optional[Any] =R'^\w+(\.\w+)*$'
lowerCAmelCase__ : List[Any] =R'<>:/\|?*'
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Dict = _uppercase_uppercase_re.sub(r'\1_\2', A__ )
SCREAMING_SNAKE_CASE_ : List[str] = _lowercase_uppercase_re.sub(r'\1_\2', A__ )
return name.lower()
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = _single_underscore_re.split(A__ )
SCREAMING_SNAKE_CASE_ : str = [_multiple_underscores_re.split(A__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A__ ) if n != '' )
def a__ ( A__ ):
if os.path.basename(A__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(A__ )
def a__ ( A__, A__ ):
if os.path.basename(A__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re, A__ ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(A__ )}-{split}'''
def a__ ( A__, A__, A__, A__=None ):
SCREAMING_SNAKE_CASE_ : Tuple = filename_prefix_for_split(A__, A__ )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(A__, A__ )
return F'''{filepath}*'''
def a__ ( A__, A__, A__, A__=None, A__=None ):
SCREAMING_SNAKE_CASE_ : Tuple = filename_prefix_for_split(A__, A__ )
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(A__, A__ )
if shard_lengths:
SCREAMING_SNAKE_CASE_ : Dict = len(A__ )
SCREAMING_SNAKE_CASE_ : Any = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(A__ )]
if filetype_suffix:
SCREAMING_SNAKE_CASE_ : Optional[int] = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 162
| 1
|
'''simple docstring'''
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def snake_case__ ( ) -> None:
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 272
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (3, 32, 128)
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
lowerCAmelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1))
return image_input
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0)
lowerCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""")
lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = processor(text=__lowerCAmelCase)
lowerCAmelCase = tokenizer(__lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase):
processor()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.char_decode(__lowerCAmelCase)
lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase)
lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = None
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = torch.randn(1 , 27 , 38)
lowerCAmelCase = torch.randn(1 , 27 , 50257)
lowerCAmelCase = torch.randn(1 , 27 , 30522)
lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 272
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase__ = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''albert'''
def __init__( self , _a=30000 , _a=128 , _a=4096 , _a=12 , _a=1 , _a=64 , _a=16384 , _a=1 , _a="gelu_new" , _a=0 , _a=0 , _a=512 , _a=2 , _a=0.0_2 , _a=1E-12 , _a=0.1 , _a="absolute" , _a=0 , _a=2 , _a=3 , **_a , ) -> int:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = embedding_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_hidden_groups
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = inner_group_num
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = classifier_dropout_prob
lowerCAmelCase_ = position_embedding_type
class __magic_name__ (__lowercase ):
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 22
|
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''bertabs'''
def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]:
super().__init__(**_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_pos
lowerCAmelCase_ = enc_layers
lowerCAmelCase_ = enc_hidden_size
lowerCAmelCase_ = enc_heads
lowerCAmelCase_ = enc_ff_size
lowerCAmelCase_ = enc_dropout
lowerCAmelCase_ = dec_layers
lowerCAmelCase_ = dec_hidden_size
lowerCAmelCase_ = dec_heads
lowerCAmelCase_ = dec_ff_size
lowerCAmelCase_ = dec_dropout
| 22
| 1
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
lowercase_ = """scheduler_config.json"""
class __UpperCamelCase ( snake_case__ ):
"""simple docstring"""
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
lowerCAmelCase_ = 3
lowerCAmelCase_ = 4
lowerCAmelCase_ = 5
@dataclass
class __UpperCamelCase ( snake_case__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = SCHEDULER_CONFIG_NAME
lowerCAmelCase_ = ['dtype']
lowerCAmelCase_ = []
lowerCAmelCase_ = True
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , _A : Dict[str, Any] = None , _A : Optional[str] = None , _A : List[str]=False , **_A : Optional[int] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = cls.load_config(
pretrained_model_name_or_path=_A , subfolder=_A , return_unused_kwargs=_A , **_A , )
__SCREAMING_SNAKE_CASE : Optional[Any] = cls.from_config(_A , return_unused_kwargs=_A , **_A )
if hasattr(_A , '''create_state''' ) and getattr(_A , '''has_state''' , _A ):
__SCREAMING_SNAKE_CASE : Dict = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase__ ( self : Tuple , _A : Union[str, os.PathLike] , _A : bool = False , **_A : Any ):
"""simple docstring"""
self.save_config(save_directory=_A , push_to_hub=_A , **_A )
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
__SCREAMING_SNAKE_CASE : Dict = importlib.import_module(__name__.split('''.''' )[0] )
__SCREAMING_SNAKE_CASE : Dict = [
getattr(_A , _A ) for c in compatible_classes_str if hasattr(_A , _A )
]
return compatible_classes
def a__ ( snake_case , snake_case ):
"""simple docstring"""
assert len(snake_case ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(snake_case ) - x.ndim) ) , snake_case )
def a__ ( snake_case , snake_case=0.999 , snake_case=jnp.floataa ):
"""simple docstring"""
def alpha_bar(snake_case ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
__SCREAMING_SNAKE_CASE : str = []
for i in range(snake_case ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(snake_case ) / alpha_bar(snake_case ) , snake_case ) )
return jnp.array(snake_case , dtype=snake_case )
@flax.struct.dataclass
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
@classmethod
def UpperCAmelCase__ ( cls : str , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = scheduler.config
if config.trained_betas is not None:
__SCREAMING_SNAKE_CASE : Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : List[Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = 1.0 - betas
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.cumprod(_A , axis=0 )
return cls(
alphas=_A , betas=_A , alphas_cumprod=_A , )
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = state.alphas_cumprod
__SCREAMING_SNAKE_CASE : str = alphas_cumprod[timesteps] ** 0.5
__SCREAMING_SNAKE_CASE : Any = sqrt_alpha_prod.flatten()
__SCREAMING_SNAKE_CASE : Any = broadcast_to_shape_from_left(snake_case , original_samples.shape )
__SCREAMING_SNAKE_CASE : Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__SCREAMING_SNAKE_CASE : str = sqrt_one_minus_alpha_prod.flatten()
__SCREAMING_SNAKE_CASE : str = broadcast_to_shape_from_left(snake_case , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_sqrt_alpha_prod(snake_case , snake_case , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = get_sqrt_alpha_prod(snake_case , snake_case , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : int = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 303
|
_a = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _a ( SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a = [None] * 1_0_0_0_0_0_0_0
_a = True
_a = False
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__lowerCAmelCase: int = chain(next_number(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Tuple = number_chain
while number < 10_00_00_00:
__lowerCAmelCase: Dict = number_chain
number *= 10
return number_chain
def _a ( SCREAMING_SNAKE_CASE : int = 10_00_00_00 ) -> int:
"""simple docstring"""
for i in range(1 , SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 322
| 0
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
@dataclass(frozen=lowercase__ )
class UpperCamelCase :
'''simple docstring'''
lowercase : str
lowercase : str
lowercase : Optional[str] =None
lowercase : Optional[str] =None
lowercase : Optional[str] =None
@dataclass(frozen=lowercase__ )
class UpperCamelCase :
'''simple docstring'''
lowercase : List[int]
lowercase : Optional[List[int]] =None
lowercase : Optional[List[int]] =None
lowercase : Optional[Union[int, float]] =None
lowercase : Optional[int] =None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : List[InputFeatures]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_=False , UpperCamelCase_ = False , ):
lowercase_ :Dict = hans_processors[task]()
lowercase_ :Tuple = os.path.join(
UpperCamelCase_ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(UpperCamelCase_ ) , UpperCamelCase_ , ) , )
lowercase_ :Any = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase_ :Union[str, Any] = label_list[2], label_list[1]
lowercase_ :List[str] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase_ :Any = cached_features_file + '''.lock'''
with FileLock(UpperCamelCase_ ):
if os.path.exists(UpperCamelCase_ ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
lowercase_ :Tuple = torch.load(UpperCamelCase_ )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
lowercase_ :Dict = (
processor.get_dev_examples(UpperCamelCase_ ) if evaluate else processor.get_train_examples(UpperCamelCase_ )
)
logger.info('''Training examples: %s''' , len(UpperCamelCase_ ) )
lowercase_ :Any = hans_convert_examples_to_features(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
logger.info('''Saving features into cached file %s''' , UpperCamelCase_ )
torch.save(self.features , UpperCamelCase_ )
def __len__( self ):
return len(self.features )
def __getitem__( self , UpperCamelCase_ ):
return self.features[i]
def UpperCamelCase ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase :
'''simple docstring'''
lowercase : List[InputFeatures]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 128 , UpperCamelCase_=False , UpperCamelCase_ = False , ):
lowercase_ :List[str] = hans_processors[task]()
lowercase_ :str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase_ :Optional[int] = label_list[2], label_list[1]
lowercase_ :Union[str, Any] = label_list
lowercase_ :Optional[int] = processor.get_dev_examples(UpperCamelCase_ ) if evaluate else processor.get_train_examples(UpperCamelCase_ )
lowercase_ :Optional[int] = hans_convert_examples_to_features(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(UpperCamelCase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowercase_ :Optional[int] = tf.data.Dataset.from_generator(
UpperCamelCase_ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCamelCase ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self , UpperCamelCase_ ):
return self.features[i]
def UpperCamelCase ( self ):
return self.label_list
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def UpperCamelCase ( self , UpperCamelCase_ ):
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase_ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def UpperCamelCase ( self , UpperCamelCase_ ):
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def UpperCamelCase ( self ):
return ["contradiction", "entailment", "neutral"]
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Dict = []
for i, line in enumerate(UpperCamelCase_ ):
if i == 0:
continue
lowercase_ :Tuple = '''%s-%s''' % (set_type, line[0])
lowercase_ :Optional[int] = line[5]
lowercase_ :Any = line[6]
lowercase_ :int = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
lowercase_ :int = line[0]
examples.append(InputExample(guid=UpperCamelCase_ , text_a=UpperCamelCase_ , text_b=UpperCamelCase_ , label=UpperCamelCase_ , pairID=UpperCamelCase_ ) )
return examples
def UpperCamelCase ( _a , _a , _a , _a , ) -> int:
'''simple docstring'''
lowercase_ :Tuple = {label: i for i, label in enumerate(_a )}
lowercase_ :List[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(_a ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
lowercase_ :List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_a , max_length=_a , padding='''max_length''' , truncation=_a , return_overflowing_tokens=_a , )
lowercase_ :List[Any] = label_map[example.label] if example.label in label_map else 0
lowercase_ :Tuple = int(example.pairID )
features.append(InputFeatures(**_a , label=_a , pairID=_a ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"guid: {example}" )
logger.info(f"features: {features[i]}" )
return features
SCREAMING_SNAKE_CASE : str = {
"hans": 3,
}
SCREAMING_SNAKE_CASE : Dict = {
"hans": HansProcessor,
}
| 370
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self ):
lowercase_ :List[Any] = 1
lowercase_ :List[Any] = 3
lowercase_ :str = (32, 32)
lowercase_ :Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCamelCase_ )
@property
def UpperCamelCase ( self ):
def extract(*UpperCamelCase_ , **UpperCamelCase_ ):
class UpperCamelCase :
'''simple docstring'''
def __init__( self ):
lowercase_ :Dict = torch.ones([0] )
def UpperCamelCase ( self , UpperCamelCase_ ):
self.pixel_values.to(UpperCamelCase_ )
return self
return Out()
return extract
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ :List[Any] = self.dummy_cond_unet
lowercase_ :int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
lowercase_ :Any = self.dummy_vae
lowercase_ :Dict = self.dummy_text_encoder
lowercase_ :str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase_ :Optional[Any] = StableDiffusionPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
lowercase_ :Union[str, Any] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :str = '''A painting of a squirrel eating a burger'''
lowercase_ :int = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[Any] = sd_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
lowercase_ :Any = output.images
lowercase_ :List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :List[Any] = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCamelCase_ , )[0]
lowercase_ :Dict = image[0, -3:, -3:, -1]
lowercase_ :List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ :List[Any] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ :List[str] = self.dummy_cond_unet
lowercase_ :Optional[Any] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
lowercase_ :Optional[Any] = self.dummy_vae
lowercase_ :List[Any] = self.dummy_text_encoder
lowercase_ :str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase_ :Tuple = StableDiffusionPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
lowercase_ :Optional[int] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :str = '''A painting of a squirrel eating a burger'''
lowercase_ :Any = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = sd_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
lowercase_ :Optional[Any] = output.images
lowercase_ :List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :List[str] = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCamelCase_ , )[0]
lowercase_ :Dict = image[0, -3:, -3:, -1]
lowercase_ :str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ :Dict = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :List[str] = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(pipe.scheduler , UpperCamelCase_ )
assert pipe.safety_checker is None
lowercase_ :Optional[int] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase_ )
lowercase_ :Union[str, Any] = StableDiffusionPipeline.from_pretrained(UpperCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase_ :List[Any] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.dummy_cond_unet
lowercase_ :Any = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
lowercase_ :int = self.dummy_vae
lowercase_ :Tuple = self.dummy_text_encoder
lowercase_ :Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
lowercase_ :Optional[int] = unet.half()
lowercase_ :Union[str, Any] = vae.half()
lowercase_ :Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ :Any = StableDiffusionPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
lowercase_ :Dict = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :List[str] = '''A painting of a squirrel eating a burger'''
lowercase_ :List[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=UpperCamelCase_ )
lowercase_ :Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase_ :List[Any] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :List[Any] = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
lowercase_ :str = 40_0366_0346
lowercase_ :Optional[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase_ :Tuple = torch.manual_seed(UpperCamelCase_ )
lowercase_ :int = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase_ :List[str] = output.images
lowercase_ :int = image[0, -3:, -3:, -1]
lowercase_ :str = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowercase_ :Dict = torch.manual_seed(UpperCamelCase_ )
lowercase_ :Any = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase_ :int = output.images
lowercase_ :Union[str, Any] = image[0, -3:, -3:, -1]
lowercase_ :Optional[int] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :Tuple = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=UpperCamelCase_ )
lowercase_ :List[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase_ :Dict = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :Optional[int] = '''padme amidala taking a bath artwork, safe for work, no nudity'''
lowercase_ :Any = 27_3497_1755
lowercase_ :str = 7
lowercase_ :Optional[Any] = torch.manual_seed(UpperCamelCase_ )
lowercase_ :Tuple = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase_ :Optional[Any] = output.images
lowercase_ :str = image[0, -3:, -3:, -1]
lowercase_ :int = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowercase_ :Any = torch.manual_seed(UpperCamelCase_ )
lowercase_ :List[Any] = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase_ :List[str] = output.images
lowercase_ :Optional[Any] = image[0, -3:, -3:, -1]
lowercase_ :Optional[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :Tuple = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
lowercase_ :Tuple = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :List[str] = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
lowercase_ :Any = 10_4435_5234
lowercase_ :Union[str, Any] = 12
lowercase_ :str = torch.manual_seed(UpperCamelCase_ )
lowercase_ :str = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase_ :Optional[int] = output.images
lowercase_ :str = image[0, -3:, -3:, -1]
lowercase_ :Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowercase_ :Dict = torch.manual_seed(UpperCamelCase_ )
lowercase_ :Optional[Any] = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase_ :Optional[Any] = output.images
lowercase_ :List[Any] = image[0, -3:, -3:, -1]
lowercase_ :Any = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 252
| 0
|
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
A = logging.get_logger(__name__)
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=None , _UpperCAmelCase=None ):
if not conversation_id:
__a : List[Any] = uuid.uuida()
if past_user_inputs is None:
__a : Tuple = []
if generated_responses is None:
__a : Dict = []
__a : uuid.UUID = conversation_id
__a : List[str] = past_user_inputs
__a : List[str] = generated_responses
__a : Optional[str] = text
def __eq__( self , _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
__a : Any = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
__a : List[str] = text
def _lowerCamelCase ( self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__a : Any = None
def _lowerCamelCase ( self , _UpperCAmelCase ):
self.generated_responses.append(_UpperCAmelCase )
def _lowerCamelCase ( self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
__a : Any = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
__a : str = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
_UpperCamelCase , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
__a : List[Any] = self.tokenizer.eos_token
def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
__a : str = {}
__a : List[Any] = {}
__a : int = {}
if min_length_for_response is not None:
__a : Dict = min_length_for_response
if minimum_tokens is not None:
__a : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
__a : Tuple = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__a : Tuple = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=0 , **_UpperCAmelCase ):
__a : Optional[Any] = super().__call__(_UpperCAmelCase , num_workers=_UpperCAmelCase , **_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=32 ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
__a : Tuple = self.tokenizer._build_conversation_input_ids(_UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__a : List[str] = self._legacy_parse_and_tokenize(_UpperCAmelCase )
if self.framework == "pt":
__a : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__a : List[Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=10 , **_UpperCAmelCase ):
__a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
__a : Tuple = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
__a : str = max_length - minimum_tokens
__a : str = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
__a : Any = model_inputs['''attention_mask'''][:, -trim:]
__a : Optional[Any] = model_inputs.pop('''conversation''' )
__a : Union[str, Any] = max_length
__a : Dict = self.model.generate(**_UpperCAmelCase , **_UpperCAmelCase )
if self.model.config.is_encoder_decoder:
__a : Optional[int] = 1
else:
__a : Tuple = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=True ):
__a : Dict = model_outputs['''output_ids''']
__a : Dict = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , )
__a : Optional[int] = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(_UpperCAmelCase )
return conversation
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Optional[Any] = self.tokenizer.eos_token_id
__a : int = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
if len(_UpperCAmelCase ) > self.tokenizer.model_max_length:
__a : int = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 160
|
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ['''image_processor''']
__lowerCAmelCase = '''SamImageProcessor'''
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__a : Any = self.image_processor
__a : List[Any] = -10
__a : str = self.image_processor.size['''longest_edge''']
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__a : Tuple = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# pop arguments that are not used in the foward but used nevertheless
__a : Optional[Any] = encoding_image_processor['''original_sizes''']
if hasattr(_UpperCAmelCase , '''numpy''' ): # Checks if Torch or TF tensor
__a : Optional[Any] = original_sizes.numpy()
__a , __a , __a : int = self._check_and_preprocess_points(
input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , input_boxes=_UpperCAmelCase , )
__a : List[Any] = self._normalize_and_convert(
_UpperCAmelCase , _UpperCAmelCase , input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , input_boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , )
return encoding_image_processor
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="pt" , ):
if input_points is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
__a : Dict = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , original_sizes[0] ) for point in input_points
]
else:
__a : Dict = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , _UpperCAmelCase )
for point, original_size in zip(_UpperCAmelCase , _UpperCAmelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__a , __a : Tuple = self._pad_points_and_labels(_UpperCAmelCase , _UpperCAmelCase )
__a : List[Any] = np.array(_UpperCAmelCase )
if input_labels is not None:
__a : List[Any] = np.array(_UpperCAmelCase )
if input_boxes is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
__a : Any = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , original_sizes[0] , is_bounding_box=_UpperCAmelCase )
for box in input_boxes
]
else:
__a : int = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , _UpperCAmelCase , is_bounding_box=_UpperCAmelCase )
for box, original_size in zip(_UpperCAmelCase , _UpperCAmelCase )
]
__a : Optional[int] = np.array(_UpperCAmelCase )
if input_boxes is not None:
if return_tensors == "pt":
__a : Any = torch.from_numpy(_UpperCAmelCase )
# boxes batch size of 1 by default
__a : str = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__a : Dict = tf.convert_to_tensor(_UpperCAmelCase )
# boxes batch size of 1 by default
__a : str = tf.expand_dims(_UpperCAmelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__a : int = torch.from_numpy(_UpperCAmelCase )
# point batch size of 1 by default
__a : Optional[Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__a : List[Any] = tf.convert_to_tensor(_UpperCAmelCase )
# point batch size of 1 by default
__a : Optional[Any] = tf.expand_dims(_UpperCAmelCase , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
__a : Any = torch.from_numpy(_UpperCAmelCase )
# point batch size of 1 by default
__a : Union[str, Any] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__a : str = tf.convert_to_tensor(_UpperCAmelCase )
# point batch size of 1 by default
__a : Dict = tf.expand_dims(_UpperCAmelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = max([point.shape[0] for point in input_points] )
__a : Dict = []
for i, point in enumerate(_UpperCAmelCase ):
if point.shape[0] != expected_nb_points:
__a : Any = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__a : List[Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_UpperCAmelCase )
__a : int = processed_input_points
return input_points, input_labels
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
__a , __a : str = original_size
__a , __a : Optional[int] = self.image_processor._get_preprocess_shape(_UpperCAmelCase , longest_edge=_UpperCAmelCase )
__a : List[str] = deepcopy(_UpperCAmelCase ).astype(_UpperCAmelCase )
if is_bounding_box:
__a : Optional[int] = coords.reshape(-1 , 2 , 2 )
__a : str = coords[..., 0] * (new_w / old_w)
__a : List[Any] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__a : List[Any] = coords.reshape(-1 , 4 )
return coords
def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if input_points is not None:
if hasattr(_UpperCAmelCase , '''numpy''' ): # Checks for TF or Torch tensor
__a : str = input_points.numpy().tolist()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not isinstance(input_points[0] , _UpperCAmelCase ):
raise ValueError('''Input points must be a list of list of floating points.''' )
__a : str = [np.array(_UpperCAmelCase ) for input_point in input_points]
else:
__a : Optional[int] = None
if input_labels is not None:
if hasattr(_UpperCAmelCase , '''numpy''' ):
__a : Dict = input_labels.numpy().tolist()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not isinstance(input_labels[0] , _UpperCAmelCase ):
raise ValueError('''Input labels must be a list of list integers.''' )
__a : Dict = [np.array(_UpperCAmelCase ) for label in input_labels]
else:
__a : Tuple = None
if input_boxes is not None:
if hasattr(_UpperCAmelCase , '''numpy''' ):
__a : List[Any] = input_boxes.numpy().tolist()
if (
not isinstance(_UpperCAmelCase , _UpperCAmelCase )
or not isinstance(input_boxes[0] , _UpperCAmelCase )
or not isinstance(input_boxes[0][0] , _UpperCAmelCase )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
__a : Optional[Any] = [np.array(_UpperCAmelCase ).astype(np.floataa ) for box in input_boxes]
else:
__a : Union[str, Any] = None
return input_points, input_labels, input_boxes
@property
def _lowerCamelCase ( self ):
__a : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(_UpperCAmelCase ) )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.image_processor.post_process_masks(*_UpperCAmelCase , **_UpperCAmelCase )
| 160
| 1
|
'''simple docstring'''
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _a ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Any = '''naver-clova-ix/donut-base-finetuned-docvqa'''
A : List[str] = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
A : Tuple = '''document_qa'''
A : Optional[Any] = AutoProcessor
A : List[str] = VisionEncoderDecoderModel
A : str = ['''image''', '''text''']
A : Optional[int] = ['''text''']
def __init__( self, *A, **A ):
'''simple docstring'''
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*A, **A )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
SCREAMING_SNAKE_CASE : Dict = task_prompt.replace('{user_input}', A )
SCREAMING_SNAKE_CASE : Any = self.pre_processor.tokenizer(
A, add_special_tokens=A, return_tensors='pt' ).input_ids
SCREAMING_SNAKE_CASE : List[str] = self.pre_processor(A, return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.model.generate(
inputs['pixel_values'].to(self.device ), decoder_input_ids=inputs['decoder_input_ids'].to(self.device ), max_length=self.model.decoder.config.max_position_embeddings, early_stopping=A, pad_token_id=self.pre_processor.tokenizer.pad_token_id, eos_token_id=self.pre_processor.tokenizer.eos_token_id, use_cache=A, num_beams=1, bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]], return_dict_in_generate=A, ).sequences
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.pre_processor.batch_decode(A )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.eos_token, '' )
SCREAMING_SNAKE_CASE : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.pad_token, '' )
SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(r'<.*?>', '', A, count=1 ).strip() # remove first task start token
SCREAMING_SNAKE_CASE : List[str] = self.pre_processor.tokenajson(A )
return sequence["answer"]
| 359
|
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
UpperCamelCase_ = TypeVar("T")
class _a ( Generic[T] ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = data
SCREAMING_SNAKE_CASE : int = self
SCREAMING_SNAKE_CASE : Optional[Any] = 0
class _a ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : dict[T, DisjointSetTreeNode[T]] = {}
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = DisjointSetTreeNode(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.map[data]
if elem_ref != elem_ref.parent:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if nodea.rank > nodea.rank:
SCREAMING_SNAKE_CASE : Optional[int] = nodea
else:
SCREAMING_SNAKE_CASE : Optional[int] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
self.link(self.find_set(A ), self.find_set(A ) )
class _a ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : dict[T, dict[T, int]] = {}
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if node not in self.connections:
SCREAMING_SNAKE_CASE : Dict = {}
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
self.add_node(A )
self.add_node(A )
SCREAMING_SNAKE_CASE : Dict = weight
SCREAMING_SNAKE_CASE : Optional[int] = weight
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : int = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda A : x[2] )
# creating the disjoint set
SCREAMING_SNAKE_CASE : int = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(A )
# MST generation
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Dict = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = edges[index]
index += 1
SCREAMING_SNAKE_CASE : List[Any] = disjoint_set.find_set(A )
SCREAMING_SNAKE_CASE : Tuple = disjoint_set.find_set(A )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(A, A, A )
disjoint_set.union(A, A )
return graph
| 246
| 0
|
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
def __call__( self ):
"""simple docstring"""
lowerCamelCase_ =torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), )
lowerCamelCase_ =1
lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample
lowerCamelCase_ =self.scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ).prev_sample
lowerCamelCase_ =scheduler_output - scheduler_output + torch.ones_like(lowerCAmelCase )
return result
| 75
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=0.1 , lowercase__=0.1 , lowercase__=224 , lowercase__=1_000 , lowercase__=[3, 3, 6, 4] , lowercase__=[48, 56, 112, 220] , ) -> int:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = num_labels
__UpperCAmelCase = image_size
__UpperCAmelCase = layer_depths
__UpperCAmelCase = embed_dims
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ (self ) -> Optional[Any]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase__ , layer_scale_init_value=1E-5 , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> int:
__UpperCAmelCase = SwiftFormerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ (self ) -> Optional[int]:
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = self.prepare_config_and_inputs()
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
a__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = SwiftFormerModelTester(self )
__UpperCAmelCase = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCAmelCase_ (self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def lowerCAmelCase_ (self ) -> List[Any]:
pass
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> Any:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = SwiftFormerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__UpperCAmelCase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = 8
self.assertEqual(len(lowercase__ ) , lowercase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
def _config_zero_init(lowercase__ ):
__UpperCAmelCase = copy.deepcopy(lowercase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase__ , lowercase__ , 1E-10 )
if isinstance(getattr(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ):
__UpperCAmelCase = _config_zero_init(getattr(lowercase__ , lowercase__ ) )
setattr(lowercase__ , lowercase__ , lowercase__ )
return configs_no_init
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = _config_zero_init(lowercase__ )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(config=lowercase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
def __a ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ (self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowercase__ )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
# verify the logits
__UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__UpperCAmelCase = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
| 333
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowercase ( __UpperCamelCase):
"""simple docstring"""
a__ : Optional[int] = "mobilenet_v1"
def __init__( self : Tuple , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : Dict=224 , __UpperCAmelCase : List[Any]=1.0 , __UpperCAmelCase : List[Any]=8 , __UpperCAmelCase : List[Any]="relu6" , __UpperCAmelCase : str=True , __UpperCAmelCase : Optional[Any]=0.999 , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : Tuple=0.001 , **__UpperCAmelCase : Tuple , ) -> List[Any]:
super().__init__(**__UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
UpperCAmelCase_= num_channels
UpperCAmelCase_= image_size
UpperCAmelCase_= depth_multiplier
UpperCAmelCase_= min_depth
UpperCAmelCase_= hidden_act
UpperCAmelCase_= tf_padding
UpperCAmelCase_= classifier_dropout_prob
UpperCAmelCase_= initializer_range
UpperCAmelCase_= layer_norm_eps
class lowercase ( __UpperCamelCase):
"""simple docstring"""
a__ : Union[str, Any] = version.parse("1.11")
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float:
return 1E-4
| 351
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] ) -> List[str]:
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCAmelCase_= DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self : Union[str, Any] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : int = 50 , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __UpperCAmelCase ):
UpperCAmelCase_= (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCAmelCase_= (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__UpperCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase_= randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase_= self.scheduler.step(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , eta=__UpperCAmelCase , use_clipped_model_output=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
UpperCAmelCase_= (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_= image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_= self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 277
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Tuple = "roc_bert"
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1e-12 , __A=True , __A=0 , __A="absolute" , __A=None , __A=True , __A=True , __A=768 , __A=910 , __A=512 , __A=2_4858 , __A=True , **__A , ):
"""simple docstring"""
lowerCamelCase : str = vocab_size
lowerCamelCase : Optional[Any] = max_position_embeddings
lowerCamelCase : Optional[int] = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : List[Any] = hidden_dropout_prob
lowerCamelCase : List[str] = attention_probs_dropout_prob
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : int = type_vocab_size
lowerCamelCase : Union[str, Any] = layer_norm_eps
lowerCamelCase : List[str] = use_cache
lowerCamelCase : Optional[int] = enable_pronunciation
lowerCamelCase : str = enable_shape
lowerCamelCase : Union[str, Any] = pronunciation_embed_dim
lowerCamelCase : Tuple = pronunciation_vocab_size
lowerCamelCase : Optional[int] = shape_embed_dim
lowerCamelCase : List[str] = shape_vocab_size
lowerCamelCase : List[str] = concat_input
lowerCamelCase : Dict = position_embedding_type
lowerCamelCase : Optional[Any] = classifier_dropout
super().__init__(pad_token_id=__A , **__A )
| 283
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , *__A , **__A ):
"""simple docstring"""
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , __A , )
super().__init__(*__A , **__A )
| 283
| 1
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_a : Optional[int]= logging.get_logger(__name__)
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Any = ["""input_features"""]
def __init__(self : int , _A : Any=80 , _A : Any=1_60_00 , _A : Union[str, Any]=1_60 , _A : int=30 , _A : str=4_00 , _A : Any=0.0 , _A : Optional[int]=False , **_A : List[str] , ) -> Optional[int]:
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
__snake_case : List[str] = n_fft
__snake_case : str = hop_length
__snake_case : Optional[Any] = chunk_length
__snake_case : int = chunk_length * sampling_rate
__snake_case : List[str] = self.n_samples // hop_length
__snake_case : Any = sampling_rate
__snake_case : Optional[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='slaney' , mel_scale='slaney' , )
def _lowercase (self : str , _A : np.array) -> np.ndarray:
__snake_case : Optional[int] = spectrogram(
_A , window_function(self.n_fft , 'hann') , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
__snake_case : List[str] = log_spec[:, :-1]
__snake_case : str = np.maximum(_A , log_spec.max() - 8.0)
__snake_case : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase (_A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0) -> List[np.ndarray]:
if attention_mask is not None:
__snake_case : Optional[Any] = np.array(_A , np.intaa)
__snake_case : Tuple = []
for vector, length in zip(_A , attention_mask.sum(-1)):
__snake_case : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
__snake_case : List[Any] = padding_value
normed_input_values.append(_A)
else:
__snake_case : int = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__(self : Any , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : Any , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
__snake_case : Dict = isinstance(_A , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
__snake_case : Dict = is_batched_numpy or (
isinstance(_A , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__snake_case : List[str] = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray):
__snake_case : Tuple = np.asarray(_A , dtype=np.floataa)
elif isinstance(_A , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__snake_case : str = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__snake_case : int = [np.asarray([raw_speech]).T]
__snake_case : Any = BatchFeature({'input_features': raw_speech})
# convert into correct format for padding
__snake_case : Any = self.pad(
_A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__snake_case : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
__snake_case : Optional[Any] = np.stack(padded_inputs['input_features'] , axis=0)
# make sure list is in array format
__snake_case : Union[str, Any] = padded_inputs.get('input_features').transpose(2 , 0 , 1)
__snake_case : List[Any] = [self._np_extract_fbank_features(_A) for waveform in input_features[0]]
if isinstance(input_features[0] , _A):
__snake_case : Union[str, Any] = [np.asarray(_A , dtype=np.floataa) for feature in input_features]
else:
__snake_case : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__snake_case : Union[str, Any] = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
__snake_case : Dict = padded_inputs.convert_to_tensors(_A)
return padded_inputs
def _lowercase (self : Optional[int]) -> Dict[str, Any]:
__snake_case : List[Any] = copy.deepcopy(self.__dict__)
__snake_case : Union[str, Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 95
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_a : Optional[int]= logging.get_logger()
@dataclass
class UpperCamelCase :
UpperCAmelCase : nn.Module
UpperCAmelCase : List[nn.Module] = field(default_factory=lowercase )
UpperCAmelCase : list = field(default_factory=lowercase )
def _lowercase (self : str , _A : Optional[Any] , _A : Tensor , _A : Tensor) -> Any:
__snake_case : str = len(list(m.modules())) == 1 or isinstance(_A , nn.Convad) or isinstance(_A , nn.BatchNormad)
if has_not_submodules:
self.traced.append(_A)
def __call__(self : Dict , _A : Tensor) -> Optional[Any]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(_A)
[x.remove() for x in self.handles]
return self
@property
def _lowercase (self : Union[str, Any]) -> List[str]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _A: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class UpperCamelCase :
UpperCAmelCase : nn.Module
UpperCAmelCase : nn.Module
UpperCAmelCase : int = 0
UpperCAmelCase : List = field(default_factory=lowercase )
UpperCAmelCase : List = field(default_factory=lowercase )
def __call__(self : List[str] , _A : Tensor) -> List[Any]:
__snake_case : Any = Tracker(self.dest)(_A).parametrized
__snake_case : int = Tracker(self.src)(_A).parametrized
__snake_case : List[Any] = list(filter(lambda _A: type(_A) not in self.src_skip , _A))
__snake_case : Any = list(filter(lambda _A: type(_A) not in self.dest_skip , _A))
if len(_A) != len(_A):
raise Exception(
f"Numbers of operations are different. Source module has {len(_A)} operations while"
f" destination module has {len(_A)}.")
for dest_m, src_m in zip(_A , _A):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}")
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : ResNetConfig , UpperCAmelCase_ : Path , UpperCAmelCase_ : bool = True ) -> List[str]:
'''simple docstring'''
print(F"Converting {name}..." )
with torch.no_grad():
__snake_case : Dict = timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_ ).eval()
__snake_case : List[Any] = ResNetForImageClassification(UpperCAmelCase_ ).eval()
__snake_case : int = ModuleTransfer(src=UpperCAmelCase_ , dest=UpperCAmelCase_ )
__snake_case : Optional[Any] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(UpperCAmelCase_ )
assert torch.allclose(from_model(UpperCAmelCase_ ) , our_model(UpperCAmelCase_ ).logits ), "The model logits don't match the original one."
__snake_case : str = F"resnet{'-'.join(name.split('resnet' ) )}"
print(UpperCAmelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCAmelCase_ , )
# we can use the convnext one
__snake_case : int = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCAmelCase_ , )
print(F"Pushed {checkpoint_name}" )
def __UpperCAmelCase ( UpperCAmelCase_ : Path , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = True ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : str = 'imagenet-1k-id2label.json'
__snake_case : Optional[Any] = 10_00
__snake_case : Any = (1, num_labels)
__snake_case : List[Any] = 'huggingface/label-files'
__snake_case : Dict = num_labels
__snake_case : Any = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
__snake_case : Any = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
__snake_case : Optional[Any] = idalabel
__snake_case : Optional[Any] = {v: k for k, v in idalabel.items()}
__snake_case : Optional[int] = partial(UpperCAmelCase_ , num_labels=UpperCAmelCase_ , idalabel=UpperCAmelCase_ , labelaid=UpperCAmelCase_ )
__snake_case : str = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(UpperCAmelCase_ , names_to_config[model_name] , UpperCAmelCase_ , UpperCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
_a : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_a : Union[str, Any]= parser.parse_args()
_a : Path= args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 95
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = 'ZinengTang/tvlt-base'
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Any:
return TvltImageProcessor.from_pretrained(self.checkpoint , **a)
def SCREAMING_SNAKE_CASE__ ( self , **a) -> str:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = TvltProcessor(image_processor=a , feature_extractor=a)
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE = TvltProcessor.from_pretrained(self.tmpdirname)
self.assertIsInstance(processor.feature_extractor , a)
self.assertIsInstance(processor.image_processor , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = TvltProcessor(image_processor=a , feature_extractor=a)
SCREAMING_SNAKE_CASE = np.ones([1_2000])
SCREAMING_SNAKE_CASE = feature_extractor(a , return_tensors='np')
SCREAMING_SNAKE_CASE = processor(audio=a , return_tensors='np')
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = TvltProcessor(image_processor=a , feature_extractor=a)
SCREAMING_SNAKE_CASE = np.ones([3, 224, 224])
SCREAMING_SNAKE_CASE = image_processor(a , return_tensors='np')
SCREAMING_SNAKE_CASE = processor(images=a , return_tensors='np')
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = TvltProcessor(image_processor=a , feature_extractor=a)
SCREAMING_SNAKE_CASE = np.ones([1_2000])
SCREAMING_SNAKE_CASE = np.ones([3, 224, 224])
SCREAMING_SNAKE_CASE = processor(audio=a , images=a)
self.assertListEqual(list(inputs.keys()) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'])
# test if it raises when no input is passed
with pytest.raises(a):
processor()
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = TvltProcessor(image_processor=a , feature_extractor=a)
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
| 137
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a_ : List[Any] = get_logger(__name__)
class _snake_case ( enum.Enum ):
_lowercase : Any = '''all_checks'''
_lowercase : str = '''basic_checks'''
_lowercase : str = '''no_checks'''
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
if expected_checksums is None:
logger.info('Unable to verify checksums.')
return
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise UnexpectedDownloadedFile(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
SCREAMING_SNAKE_CASE = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE = ' for ' + verification_name if verification_name is not None else ''
if len(_UpperCAmelCase) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error')
logger.info('All the checksums matched successfully' + for_verification_name)
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if expected_splits is None:
logger.info('Unable to verify splits sizes.')
return
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise ExpectedMoreSplits(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise UnexpectedSplits(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
SCREAMING_SNAKE_CASE = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_UpperCAmelCase) > 0:
raise NonMatchingSplitsSizesError(str(_UpperCAmelCase))
logger.info('All the splits matched successfully.')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = True):
if record_checksum:
SCREAMING_SNAKE_CASE = shaaaa()
with open(_UpperCAmelCase , 'rb') as f:
for chunk in iter(lambda: f.read(1 << 20) , B''):
m.update(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = m.hexdigest()
else:
SCREAMING_SNAKE_CASE = None
return {"num_bytes": os.path.getsize(_UpperCAmelCase), "checksum": checksum}
def lowerCamelCase__ (_UpperCAmelCase):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 137
| 1
|
import torch
from diffusers import DiffusionPipeline
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
def __call__( self ):
"""simple docstring"""
UpperCamelCase_ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
UpperCamelCase_ = 1
UpperCamelCase_ = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
UpperCamelCase_ = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
UpperCamelCase_ = scheduler_output - scheduler_output + torch.ones_like(__UpperCamelCase )
return result
| 261
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( a__ : Dict ) -> List[Any]:
UpperCamelCase_ = {}
UpperCamelCase_ = tokenizer(example["""content"""] , truncation=a__ )["""input_ids"""]
UpperCamelCase_ = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
_A = HfArgumentParser(PretokenizationArguments)
_A = parser.parse_args()
if args.num_workers is None:
_A = multiprocessing.cpu_count()
_A = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_A = time.time()
_A = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
_A = time.time()
_A = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_A = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 261
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_UpperCamelCase : str = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
def __init__( self , *a , **a ) -> Dict:
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 77
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__a = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = field(default=_a , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=_a , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=_a , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=_a , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=_a , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def lowerCamelCase ( self : List[str] ):
snake_case__ : int = super().to_dict()
for k, v in d.items():
if isinstance(snake_case_ , snake_case_ ):
snake_case__ : Optional[int] = v.to_dict()
return d
| 35
| 0
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = set()
_SCREAMING_SNAKE_CASE = []
def parse_line(snake_case__ ):
for line in fp:
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(snake_case__ ) > 0:
_SCREAMING_SNAKE_CASE = """\n""".join(snake_case__ )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(snake_case__ )
buffer.clear()
continue
else:
_SCREAMING_SNAKE_CASE = line.strip()
buffer.append(snake_case__ )
if from_gh:
for filename in os.listdir(snake_case__ ):
_SCREAMING_SNAKE_CASE = os.path.join(snake_case__ ,snake_case__ )
if not os.path.isdir(snake_case__ ):
# read the file
if filename != "warnings.txt":
continue
with open(snake_case__ ) as fp:
parse_line(snake_case__ )
else:
try:
with zipfile.ZipFile(snake_case__ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case__ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(snake_case__ ) as fp:
parse_line(snake_case__ )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = set()
_SCREAMING_SNAKE_CASE = [os.path.join(snake_case__ ,snake_case__ ) for p in os.listdir(snake_case__ ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(snake_case__ ,snake_case__ ) )
return selected_warnings
if __name__ == "__main__":
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
return values.split(""",""" )
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCamelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCamelCase = extract_warnings(args.output_dir, args.targets)
UpperCamelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 125
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __UpperCAmelCase :
def __init__( self: Tuple , UpperCAmelCase_: Tuple , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = 13
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 99
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 37
_SCREAMING_SNAKE_CASE = """gelu"""
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 512
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 0.02
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase ( self: int , UpperCAmelCase_: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = TFEsmModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ )
# Also check the case where encoder outputs are not passed
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFEsmForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Tuple = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : List[str] = False
__snake_case : Union[str, Any] = False
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_SCREAMING_SNAKE_CASE = model.get_bias()
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
for k, v in name.items():
assert isinstance(UpperCAmelCase_ , tf.Variable )
else:
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
assert x is None
_SCREAMING_SNAKE_CASE = model.get_bias()
assert name is None
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , UpperCAmelCase_ )
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 125
| 1
|
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : Optional[Any] = logging.getLogger()
lowerCamelCase : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A__ ( A__ ):
def A ( self : List[str] , _a : List[str] ) -> Optional[int]:
'''simple docstring'''
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE ={'source': 'What is love ?', 'target': 'life'}
_SCREAMING_SNAKE_CASE ={'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_SCREAMING_SNAKE_CASE ='\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(_a , f"{split}.{field}" ) , 'w' ) as f:
f.write(_a )
def A ( self : int , _a : int , _a : str = "pytorch" ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =os.path.join(_a , 'output' )
_SCREAMING_SNAKE_CASE =os.path.join(_a , 'data' )
self._create_dummy_data(data_dir=_a )
_SCREAMING_SNAKE_CASE =f"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split()
if gpus > 0:
testargs.append(f"--gpus={gpus}" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
_SCREAMING_SNAKE_CASE =[sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_a , env=self.get_env() )
_SCREAMING_SNAKE_CASE =os.path.join(_a , 'metrics.json' )
with open(_a ) as f:
_SCREAMING_SNAKE_CASE =json.load(_a )
return result
@require_torch_gpu
def A ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def A ( self : Any ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def A ( self : str ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def A ( self : Tuple ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 47
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 235
| 0
|
"""simple docstring"""
from functools import lru_cache
@lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
import argparse
import json
from tqdm import tqdm
def lowerCAmelCase_ ( ) -> Tuple:
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=UpperCamelCase_ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=UpperCamelCase_ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=UpperCamelCase_ , help="where to store parsed gold_data_path file" , )
UpperCamelCase_ = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
UpperCamelCase_ = json.load(UpperCamelCase_ )
for dpr_record in tqdm(UpperCamelCase_ ):
UpperCamelCase_ = dpr_record["question"]
UpperCamelCase_ = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(UpperCamelCase_ ) + "\n" )
if __name__ == "__main__":
main()
| 328
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( __lowercase : str , __lowercase : list[str] | None = None ) -> list[list[str]]:
'''simple docstring'''
_UpperCAmelCase = word_bank or []
# create a table
_UpperCAmelCase = len(__lowercase ) + 1
_UpperCAmelCase = []
for _ in range(__lowercase ):
table.append([] )
# seed value
_UpperCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__lowercase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__lowercase )] == word:
_UpperCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__lowercase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__lowercase )]:
combination.reverse()
return table[len(__lowercase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 22
|
'''simple docstring'''
import string
from math import logaa
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
_UpperCAmelCase = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> tuple[int, int]:
'''simple docstring'''
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("\n" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__lowercase ))
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any]=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 22
| 1
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="resnet50" , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : Any = out_indices if out_indices is not None else [4]
__UpperCAmelCase : Optional[int] = stage_names
__UpperCAmelCase : int = out_features
__UpperCAmelCase : Tuple = backbone
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : Any = num_channels
__UpperCAmelCase : List[Any] = use_pretrained_backbone
__UpperCAmelCase : Optional[int] = is_training
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : str = self.get_config()
return config, pixel_values
def __A ( self ) -> List[Any]:
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Tuple = TimmBackbone(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase : List[str] = config_and_inputs
__UpperCAmelCase : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Dict = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = TimmBackboneModelTester(self )
__UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def __A ( self ) -> int:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = """resnet18"""
__UpperCAmelCase : Optional[int] = """microsoft/resnet-18"""
__UpperCAmelCase : str = AutoBackbone.from_pretrained(__UpperCAmelCase , use_timm_backbone=__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = AutoBackbone.from_pretrained(__UpperCAmelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__UpperCAmelCase : str = AutoBackbone.from_pretrained(__UpperCAmelCase , use_timm_backbone=__UpperCAmelCase , out_indices=[1, 2, 3] )
__UpperCAmelCase : List[str] = AutoBackbone.from_pretrained(__UpperCAmelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def __A ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __A ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def __A ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__UpperCAmelCase )
__UpperCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : str = True
__UpperCAmelCase : Optional[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__UpperCAmelCase : List[str] = self.all_model_classes[0]
__UpperCAmelCase : Any = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
__UpperCAmelCase : Tuple = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : int = model(**__UpperCAmelCase )
__UpperCAmelCase : str = outputs[0][-1]
# Encoder-/Decoder-only models
__UpperCAmelCase : Union[str, Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__UpperCAmelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(**__UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__UpperCAmelCase : Tuple = copy.deepcopy(__UpperCAmelCase )
__UpperCAmelCase : Any = None
__UpperCAmelCase : Dict = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(**__UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__UpperCAmelCase : Optional[Any] = copy.deepcopy(__UpperCAmelCase )
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[str] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(**__UpperCAmelCase )
| 16
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _A :
def __init__( self , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=64 , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : str = np.random.default_rng(__UpperCAmelCase )
__UpperCAmelCase : List[str] = length
__UpperCAmelCase : List[Any] = rng.normal(size=(length,) ).astype(np.floataa )
__UpperCAmelCase : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Dict:
'''simple docstring'''
return self.length
def __getitem__( self , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class _A ( torch.nn.Module ):
def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
super().__init__()
__UpperCAmelCase : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__UpperCAmelCase : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__UpperCAmelCase : Any = True
def __A ( self , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__UpperCAmelCase : Optional[int] = False
return x * self.a[0] + self.b[0]
class _A ( torch.nn.Module ):
def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Tuple = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() )
__UpperCAmelCase : List[str] = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() )
__UpperCAmelCase : str = True
def __A ( self , __UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__UpperCAmelCase : int = False
return x * self.a + self.b
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
__UpperCAmelCase : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCAmelCase : List[str] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
__UpperCAmelCase : Tuple = load_dataset("""csv""" , data_files=lowerCAmelCase__ )
__UpperCAmelCase : Optional[Any] = datasets["""train"""].unique("""label""" )
__UpperCAmelCase : str = {v: i for i, v in enumerate(lowerCAmelCase__ )}
def tokenize_function(lowerCAmelCase__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : List[Any] = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" )
if "label" in examples:
__UpperCAmelCase : Optional[Any] = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCAmelCase : Tuple = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowerCAmelCase__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCAmelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__UpperCAmelCase : Optional[Any] = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 )
__UpperCAmelCase : List[Any] = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 16
| 1
|
def UpperCAmelCase_ ( __snake_case = 50 ) -> int:
"""simple docstring"""
_lowercase =[1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5
|
'''simple docstring'''
lowerCamelCase : Any = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase : int = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase : str = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 47
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class A_ ( unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = StableDiffusionLDMaDPipeline
_UpperCAmelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : Optional[Any]):
torch.manual_seed(0)
__lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=3_2 ,)
__lowerCamelCase : Tuple = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,)
torch.manual_seed(0)
__lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=6 ,out_channels=6 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0)
__lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
__lowerCamelCase : Optional[int] = CLIPTextModel(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
__lowerCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Dict=0):
if str(SCREAMING_SNAKE_CASE__).startswith('mps'):
__lowerCamelCase : int = torch.manual_seed(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase ( self : str):
__lowerCamelCase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : int = StableDiffusionLDMaDPipeline(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = ldmad_pipe.to(SCREAMING_SNAKE_CASE__)
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = ldmad_pipe(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Any = output.rgb, output.depth
__lowerCamelCase : Union[str, Any] = rgb[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
__lowerCamelCase : int = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262])
__lowerCamelCase : Any = np.array([103.46727, 85.812004, 87.849236])
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth).max() < 1E-2
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : Any = StableDiffusionLDMaDPipeline(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = ldmad_pipe.to(SCREAMING_SNAKE_CASE__)
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = 3 * [inputs['prompt']]
# forward
__lowerCamelCase : int = ldmad_pipe(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : int = output.rgb, output.depth
__lowerCamelCase : Tuple = rgb_slice_a[0, -3:, -3:, -1]
__lowerCamelCase : List[str] = depth_slice_a[0, -3:, -1]
__lowerCamelCase : str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = 3 * [inputs.pop('prompt')]
__lowerCamelCase : List[Any] = ldmad_pipe.tokenizer(
SCREAMING_SNAKE_CASE__ ,padding='max_length' ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=SCREAMING_SNAKE_CASE__ ,return_tensors='pt' ,)
__lowerCamelCase : List[Any] = text_inputs['input_ids'].to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = ldmad_pipe.text_encoder(SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : Dict = prompt_embeds
# forward
__lowerCamelCase : Tuple = ldmad_pipe(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Any = output.rgb, output.depth
__lowerCamelCase : List[str] = rgb_slice_a[0, -3:, -3:, -1]
__lowerCamelCase : List[Any] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten()).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten()).max() < 1E-4
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : str = self.get_dummy_components()
__lowerCamelCase : List[Any] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = StableDiffusionLDMaDPipeline(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = ldmad_pipe.to(SCREAMING_SNAKE_CASE__)
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = 'french fries'
__lowerCamelCase : Optional[int] = ldmad_pipe(**SCREAMING_SNAKE_CASE__ ,negative_prompt=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Any = output.rgb, output.depth
__lowerCamelCase : Dict = rgb[0, -3:, -3:, -1]
__lowerCamelCase : List[str] = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
__lowerCamelCase : List[Any] = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217])
__lowerCamelCase : List[str] = np.array([107.84738, 84.62802, 89.962135])
assert np.abs(rgb_slice.flatten() - expected_slice_rgb).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth).max() < 1E-2
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : str):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str]="cpu" ,SCREAMING_SNAKE_CASE__ : Dict=torch.floataa ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0):
__lowerCamelCase : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = np.random.RandomState(SCREAMING_SNAKE_CASE__).standard_normal((1, 4, 6_4, 6_4))
__lowerCamelCase : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE__).to(device=SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d')
__lowerCamelCase : Optional[Any] = ldmad_pipe.to(SCREAMING_SNAKE_CASE__)
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = self.get_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = ldmad_pipe(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Dict = output.rgb, output.depth
__lowerCamelCase : List[Any] = rgb[0, -3:, -3:, -1].flatten()
__lowerCamelCase : int = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2)
__lowerCamelCase : List[Any] = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706])
__lowerCamelCase : List[str] = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706])
assert np.abs(rgb_slice - expected_slice_rgb).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth).max() < 3E-3
@nightly
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[Any]):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[Any]="cpu" ,SCREAMING_SNAKE_CASE__ : List[str]=torch.floataa ,SCREAMING_SNAKE_CASE__ : Tuple=0):
__lowerCamelCase : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = np.random.RandomState(SCREAMING_SNAKE_CASE__).standard_normal((1, 4, 6_4, 6_4))
__lowerCamelCase : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__).to(device=SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 5_0,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Tuple = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d').to(SCREAMING_SNAKE_CASE__)
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = self.get_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = ldmad_pipe(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = output.rgb, output.depth
__lowerCamelCase : Union[str, Any] = 0.495586
__lowerCamelCase : Any = 0.33795515
__lowerCamelCase : Dict = 112.48518
__lowerCamelCase : Optional[int] = 98.489746
assert np.abs(expected_rgb_mean - rgb.mean()) < 1E-3
assert np.abs(expected_rgb_std - rgb.std()) < 1E-3
assert np.abs(expected_depth_mean - depth.mean()) < 1E-3
assert np.abs(expected_depth_std - depth.std()) < 1E-3
def lowerCAmelCase ( self : int):
__lowerCamelCase : Any = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c').to(SCREAMING_SNAKE_CASE__)
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = ldmad_pipe(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : str = output.rgb, output.depth
__lowerCamelCase : Union[str, Any] = 0.4194127
__lowerCamelCase : str = 0.35375586
__lowerCamelCase : Tuple = 0.5638502
__lowerCamelCase : List[Any] = 0.34686103
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2, 1)
assert np.abs(expected_rgb_mean - rgb.mean()) < 1E-3
assert np.abs(expected_rgb_std - rgb.std()) < 1E-3
assert np.abs(expected_depth_mean - depth.mean()) < 1E-3
assert np.abs(expected_depth_std - depth.std()) < 1E-3
| 113
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : List[Any] = logging.get_logger()
# the current default level is logging.WARNING
__lowerCamelCase : Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = logging.get_verbosity()
__lowerCamelCase : str = logging.get_logger('transformers.models.bart.tokenization_bart')
__lowerCamelCase : Tuple = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,msg + '\n')
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,'')
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,msg + '\n')
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE__)
@mockenv(TRANSFORMERS_VERBOSITY='error')
def lowerCAmelCase ( self : Tuple):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__lowerCamelCase : int = logging.get_logger('transformers.models.bart.tokenization_bart')
__lowerCamelCase : int = os.getenv('TRANSFORMERS_VERBOSITY' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = logging.log_levels[env_level_str]
__lowerCamelCase : Tuple = logging.get_verbosity()
self.assertEqual(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" ,)
# restore to the original level
__lowerCamelCase : List[str] = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error')
def lowerCAmelCase ( self : List[Any]):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
__lowerCamelCase : List[str] = logging.logging.getLogger()
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart')
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' ,cl.out)
# no need to restore as nothing was changed
def lowerCAmelCase ( self : Any):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
__lowerCamelCase : Tuple = logging.get_logger('transformers.models.bart.tokenization_bart')
__lowerCamelCase : Optional[int] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1'):
# nothing should be logged as env var disables this method
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,'')
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=''):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,msg + '\n')
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 113
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : int = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "ibert"
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]=30_522 , __SCREAMING_SNAKE_CASE : Optional[Any]=768 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : int=512 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Tuple=1E-12 , __SCREAMING_SNAKE_CASE : Dict=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]="absolute" , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : List[str]="none" , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = quant_mode
__SCREAMING_SNAKE_CASE = force_dequant
class lowerCAmelCase__ ( a ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 267
|
'''simple docstring'''
from __future__ import annotations
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if len(a__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(a__ )
or left < -len(a__ )
or right >= len(a__ )
or right < -len(a__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__SCREAMING_SNAKE_CASE = (left + right) >> 1 # the middle
__SCREAMING_SNAKE_CASE = find_max(a__ , a__ , a__ ) # find max in range[left, mid]
__SCREAMING_SNAKE_CASE = find_max(a__ , mid + 1 , a__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 267
| 1
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
if isinstance(lowercase_ , torch.Tensor ):
return image
elif isinstance(lowercase_ , PIL.Image.Image ):
snake_case_ = [image]
if isinstance(image[0] , PIL.Image.Image ):
snake_case_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
snake_case_ = np.concatenate(lowercase_ , axis=0 )
snake_case_ = np.array(lowercase_ ).astype(np.floataa ) / 2_55.0
snake_case_ = image.transpose(0 , 3 , 1 , 2 )
snake_case_ = 2.0 * image - 1.0
snake_case_ = torch.from_numpy(lowercase_ )
elif isinstance(image[0] , torch.Tensor ):
snake_case_ = torch.cat(lowercase_ , dim=0 )
return image
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_=0.99_95 ) -> Any:
'''simple docstring'''
if not isinstance(lowercase_ , np.ndarray ):
snake_case_ = True
snake_case_ = va.device
snake_case_ = va.cpu().numpy()
snake_case_ = va.cpu().numpy()
snake_case_ = np.sum(va * va / (np.linalg.norm(lowercase_ ) * np.linalg.norm(lowercase_ )) )
if np.abs(lowercase_ ) > DOT_THRESHOLD:
snake_case_ = (1 - t) * va + t * va
else:
snake_case_ = np.arccos(lowercase_ )
snake_case_ = np.sin(lowercase_ )
snake_case_ = theta_a * t
snake_case_ = np.sin(lowercase_ )
snake_case_ = np.sin(theta_a - theta_t ) / sin_theta_a
snake_case_ = sin_theta_t / sin_theta_a
snake_case_ = sa * va + sa * va
if inputs_are_torch:
snake_case_ = torch.from_numpy(lowercase_ ).to(lowercase_ )
return va
def UpperCamelCase( lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = F.normalize(lowercase_ , dim=-1 )
snake_case_ = F.normalize(lowercase_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase( lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
for param in model.parameters():
snake_case_ = value
class __lowerCamelCase ( __snake_case ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
super().__init__()
self.register_modules(
vae=lowerCamelCase , text_encoder=lowerCamelCase , clip_model=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , feature_extractor=lowerCamelCase , coca_model=lowerCamelCase , coca_tokenizer=lowerCamelCase , coca_transform=lowerCamelCase , )
snake_case_ = (
feature_extractor.size
if isinstance(feature_extractor.size , lowerCamelCase )
else feature_extractor.size["""shortest_edge"""]
)
snake_case_ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowerCamelCase )
set_requires_grad(self.clip_model , lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowerCAmelCase_ ( self ) -> str:
self.enable_attention_slicing(lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[int]:
set_requires_grad(self.vae , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Any:
set_requires_grad(self.vae , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> str:
set_requires_grad(self.unet , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
set_requires_grad(self.unet , lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
# get the original timestep using init_timestep
snake_case_ = min(int(num_inference_steps * strength ) , lowerCamelCase )
snake_case_ = max(num_inference_steps - init_timestep , 0 )
snake_case_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ) -> Union[str, Any]:
if not isinstance(lowerCamelCase , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(lowerCamelCase )}''' )
snake_case_ = image.to(device=lowerCamelCase , dtype=lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCamelCase )
]
snake_case_ = torch.cat(lowerCamelCase , dim=0 )
else:
snake_case_ = self.vae.encode(lowerCamelCase ).latent_dist.sample(lowerCamelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case_ = 0.1_8215 * init_latents
snake_case_ = init_latents.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = randn_tensor(init_latents.shape , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase )
# get latents
snake_case_ = self.scheduler.add_noise(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case_ = init_latents
return latents
def lowerCAmelCase_ ( self , lowerCamelCase ) -> str:
snake_case_ = self.coca_transform(lowerCamelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
snake_case_ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
snake_case_ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
snake_case_ = self.feature_extractor.preprocess(lowerCamelCase )
snake_case_ = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
snake_case_ = self.clip_model.get_image_features(lowerCamelCase )
snake_case_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCamelCase )
snake_case_ = image_embeddings_clip.repeat_interleave(lowerCamelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> int:
snake_case_ = latents.detach().requires_grad_()
snake_case_ = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
snake_case_ = self.unet(lowerCamelCase , lowerCamelCase , encoder_hidden_states=lowerCamelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
snake_case_ = self.scheduler.alphas_cumprod[timestep]
snake_case_ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case_ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
snake_case_ = torch.sqrt(lowerCamelCase )
snake_case_ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowerCamelCase ):
snake_case_ = self.scheduler.sigmas[index]
snake_case_ = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case_ = 1 / 0.1_8215 * sample
snake_case_ = self.vae.decode(lowerCamelCase ).sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ = transforms.Resize(self.feature_extractor_size )(lowerCamelCase )
snake_case_ = self.normalize(lowerCamelCase ).to(latents.dtype )
snake_case_ = self.clip_model.get_image_features(lowerCamelCase )
snake_case_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCamelCase )
snake_case_ = spherical_dist_loss(lowerCamelCase , lowerCamelCase ).mean() * clip_guidance_scale
snake_case_ = -torch.autograd.grad(lowerCamelCase , lowerCamelCase )[0]
if isinstance(self.scheduler , lowerCamelCase ):
snake_case_ = latents.detach() + grads * (sigma**2)
snake_case_ = noise_pred_original
else:
snake_case_ = noise_pred_original - torch.sqrt(lowerCamelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 512 , lowerCamelCase = 512 , lowerCamelCase = 0.6 , lowerCamelCase = 50 , lowerCamelCase = 7.5 , lowerCamelCase = 1 , lowerCamelCase = 0.0 , lowerCamelCase = 100 , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , lowerCamelCase = 0.8 , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , ) -> str:
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(lowerCamelCase )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(lowerCamelCase , torch.Generator ) and batch_size > 1:
snake_case_ = [generator] + [None] * (batch_size - 1)
snake_case_ = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
snake_case_ = [x[0] for x in coca_is_none if x[1]]
snake_case_ = """, """.join(lowerCamelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCamelCase ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
snake_case_ = self.get_image_description(lowerCamelCase )
if style_prompt is None:
if len(lowerCamelCase ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
snake_case_ = self.get_image_description(lowerCamelCase )
# get prompt text embeddings for content and style
snake_case_ = self.tokenizer(
lowerCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=lowerCamelCase , return_tensors="""pt""" , )
snake_case_ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
snake_case_ = self.tokenizer(
lowerCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=lowerCamelCase , return_tensors="""pt""" , )
snake_case_ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
snake_case_ = slerp(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# duplicate text embeddings for each generation per prompt
snake_case_ = text_embeddings.repeat_interleave(lowerCamelCase , dim=0 )
# set timesteps
snake_case_ = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
snake_case_ = {}
if accepts_offset:
snake_case_ = 1
self.scheduler.set_timesteps(lowerCamelCase , **lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
snake_case_ , snake_case_ = self.get_timesteps(lowerCamelCase , lowerCamelCase , self.device )
snake_case_ = timesteps[:1].repeat(lowerCamelCase )
# Preprocess image
snake_case_ = preprocess(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case_ = self.prepare_latents(
lowerCamelCase , lowerCamelCase , lowerCamelCase , text_embeddings.dtype , self.device , lowerCamelCase )
snake_case_ = preprocess(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case_ = self.prepare_latents(
lowerCamelCase , lowerCamelCase , lowerCamelCase , text_embeddings.dtype , self.device , lowerCamelCase )
snake_case_ = slerp(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if clip_guidance_scale > 0:
snake_case_ = self.get_clip_image_embeddings(lowerCamelCase , lowerCamelCase )
snake_case_ = self.get_clip_image_embeddings(lowerCamelCase , lowerCamelCase )
snake_case_ = slerp(
lowerCamelCase , lowerCamelCase , lowerCamelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case_ = content_text_input.input_ids.shape[-1]
snake_case_ = self.tokenizer([""""""] , padding="""max_length""" , max_length=lowerCamelCase , return_tensors="""pt""" )
snake_case_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
snake_case_ = uncond_embeddings.repeat_interleave(lowerCamelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case_ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
snake_case_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
snake_case_ = torch.randn(lowerCamelCase , generator=lowerCamelCase , device="""cpu""" , dtype=lowerCamelCase ).to(
self.device )
else:
snake_case_ = torch.randn(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
snake_case_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case_ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ = {}
if accepts_eta:
snake_case_ = eta
# check if the scheduler accepts generator
snake_case_ = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
snake_case_ = generator
with self.progress_bar(total=lowerCamelCase ):
for i, t in enumerate(lowerCamelCase ):
# expand the latents if we are doing classifier free guidance
snake_case_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
snake_case_ = self.unet(lowerCamelCase , lowerCamelCase , encoder_hidden_states=lowerCamelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
snake_case_ , snake_case_ = noise_pred.chunk(2 )
snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
snake_case_ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
snake_case_ , snake_case_ = self.cond_fn(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case_ = 1 / 0.1_8215 * latents
snake_case_ = self.vae.decode(lowerCamelCase ).sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCamelCase , nsfw_content_detected=lowerCamelCase )
| 34
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Tuple = 'levit'
def __init__( self , lowerCamelCase=224 , lowerCamelCase=3 , lowerCamelCase=3 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=16 , lowerCamelCase=[128, 256, 384] , lowerCamelCase=[4, 8, 12] , lowerCamelCase=[4, 4, 4] , lowerCamelCase=[16, 16, 16] , lowerCamelCase=0 , lowerCamelCase=[2, 2, 2] , lowerCamelCase=[2, 2, 2] , lowerCamelCase=0.02 , **lowerCamelCase , ) -> Tuple:
super().__init__(**lowerCamelCase )
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = kernel_size
snake_case_ = stride
snake_case_ = padding
snake_case_ = hidden_sizes
snake_case_ = num_attention_heads
snake_case_ = depths
snake_case_ = key_dim
snake_case_ = drop_path_rate
snake_case_ = patch_size
snake_case_ = attention_ratio
snake_case_ = mlp_ratio
snake_case_ = initializer_range
snake_case_ = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Any = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self ) -> float:
return 1e-4
| 34
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.