code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ : int
UpperCAmelCase_ : TreeNode | None =None
UpperCAmelCase_ : TreeNode | None =None
_UpperCamelCase = namedtuple('''CoinsDistribResult''', '''moves excess''')
def lowerCAmelCase__( lowercase : TreeNode | None ) -> int:
if root is None:
return 0
# Validation
def count_nodes(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase ) != count_coins(lowercase ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__snake_case , __snake_case : List[str] = get_distrib(node.left )
__snake_case , __snake_case : List[str] = get_distrib(node.right )
__snake_case : Dict = 1 - left_distrib_excess
__snake_case : Dict = 1 - right_distrib_excess
__snake_case : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase )
+ abs(lowercase )
)
__snake_case : List[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase , lowercase )
return get_distrib(lowercase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase__( lowercase : Dict ) -> str: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase__( ) -> List[Any]:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case : Any = [1, 2, 3]
with pytest.raises(lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase , lowercase , num_proc=2 )
with pytest.raises(lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase , lowercase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def lowerCAmelCase__( lowercase : Dict ) -> Dict:
__snake_case : Any = [1, 2]
__snake_case : Dict = {"a": 1, "b": 2}
__snake_case : Optional[int] = {"a": [1, 2], "b": [3, 4]}
__snake_case : int = {"a": {"1": 1}, "b": 2}
__snake_case : str = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case : Dict = [2, 3]
__snake_case : Tuple = {"a": 2, "b": 3}
__snake_case : int = {"a": [2, 3], "b": [4, 5]}
__snake_case : Dict = {"a": {"1": 2}, "b": 3}
__snake_case : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
| 326 | 1 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , A : Tuple=-1 ):
# in NER datasets, the last column is usually reserved for NER label
__snake_case: Tuple = label_idx
def UpperCAmelCase__ ( self : int , A : List[str] , A : Union[Split, str] ):
if isinstance(A , A ):
__snake_case: Optional[int] = mode.value
__snake_case: Optional[int] = os.path.join(A , f'''{mode}.txt''' )
__snake_case: Dict = 1
__snake_case: Optional[Any] = []
with open(A , encoding="""utf-8""" ) as f:
__snake_case: int = []
__snake_case: List[str] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=A , labels=A ) )
guid_index += 1
__snake_case: List[str] = []
__snake_case: Optional[int] = []
else:
__snake_case: str = line.split(""" """ )
words.append(splits[0] )
if len(A ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=A , labels=A ) )
return examples
def UpperCAmelCase__ ( self : Tuple , A : TextIO , A : TextIO , A : List ):
__snake_case: Tuple = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(A )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__snake_case: List[Any] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(A )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def UpperCAmelCase__ ( self : List[Any] , A : str ):
if path:
with open(A , """r""" ) as f:
__snake_case: Any = f.read().splitlines()
if "O" not in labels:
__snake_case: Dict = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Any ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def UpperCAmelCase__ ( self : List[Any] , A : str ):
if path:
with open(A , """r""" ) as f:
__snake_case: Optional[Any] = f.read().splitlines()
if "O" not in labels:
__snake_case: str = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] , A : Any , A : Union[Split, str] ):
if isinstance(A , A ):
__snake_case: Optional[int] = mode.value
__snake_case: str = os.path.join(A , f'''{mode}.txt''' )
__snake_case: Optional[int] = 1
__snake_case: str = []
with open(A , encoding="""utf-8""" ) as f:
for sentence in parse_incr(A ):
__snake_case: Union[str, Any] = []
__snake_case: List[Any] = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(A ) == len(A )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=A , labels=A ) )
guid_index += 1
return examples
def UpperCAmelCase__ ( self : Tuple , A : TextIO , A : TextIO , A : List ):
__snake_case: List[str] = 0
for sentence in parse_incr(A ):
__snake_case: List[Any] = preds_list[example_id]
__snake_case: int = """"""
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(A )
example_id += 1
def UpperCAmelCase__ ( self : List[Any] , A : str ):
if path:
with open(A , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 293 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , A : AutoencoderKL , A : CLIPTextModel , A : CLIPTokenizer , A : UNetaDConditionModel , A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , A : StableDiffusionSafetyChecker , A : CLIPImageProcessor , ):
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case: Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase__ ( self : str ):
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self : List[str] , A : Union[str, List[str]] , A : int = 512 , A : int = 512 , A : int = 50 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , A : Optional[torch.FloatTensor] = None , **A : Optional[Any] , ):
if isinstance(A , A ):
__snake_case: int = 1
elif isinstance(A , A ):
__snake_case: Optional[Any] = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
__snake_case: Tuple = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case: Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case: List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case: Dict = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__snake_case: Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case: List[Any] = text_embeddings.shape
__snake_case: Tuple = text_embeddings.repeat(1 , A , 1 )
__snake_case: Dict = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case: List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case: List[str]
if negative_prompt is None:
__snake_case: Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
__snake_case: List[str] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case: str = negative_prompt
__snake_case: Any = text_input_ids.shape[-1]
__snake_case: Dict = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
__snake_case: Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case: Optional[Any] = uncond_embeddings.shape[1]
__snake_case: str = uncond_embeddings.repeat(A , A , 1 )
__snake_case: List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case: Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case: Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case: List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__snake_case: Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case: Any = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
__snake_case: Tuple = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
__snake_case: Dict = torch.randn(
A , generator=A , device=self.device , dtype=A )
__snake_case: Optional[int] = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case: Optional[int] = latents_reference.to(self.device )
__snake_case: List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__snake_case: int = (latents_shape[3] - latents_shape_reference[3]) // 2
__snake_case: Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
__snake_case: int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__snake_case: Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__snake_case: List[Any] = 0 if dx < 0 else dx
__snake_case: Dict = 0 if dy < 0 else dy
__snake_case: List[str] = max(-dx , 0 )
__snake_case: int = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__snake_case: List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case: str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case: Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case: Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case: int = {}
if accepts_eta:
__snake_case: Optional[Any] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
__snake_case: str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case: Dict = self.scheduler.scale_model_input(A , A )
# predict the noise residual
__snake_case: List[Any] = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case: Any = noise_pred.chunk(2 )
__snake_case: Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case: str = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
__snake_case: Optional[int] = 1 / 0.1_8215 * latents
__snake_case: List[Any] = self.vae.decode(A ).sample
__snake_case: str = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__snake_case: List[Any] = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
__snake_case , __snake_case: List[str] = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__snake_case: Optional[int] = None
if output_type == "pil":
__snake_case: Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 293 | 1 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def _A ( snake_case , snake_case , snake_case ) -> List[str]:
_lowercase : Optional[int] = hf_hub_url(repo_id=snake_case , path=snake_case , revision=snake_case )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case )}'''
| 250 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowercase : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.dummy_uncond_unet
_lowercase : Dict = KarrasVeScheduler()
_lowercase : Any = KarrasVePipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : Any = torch.manual_seed(0 )
_lowercase : List[Any] = pipe(num_inference_steps=2 , generator=_UpperCamelCase , output_type="numpy" ).images
_lowercase : Optional[Any] = torch.manual_seed(0 )
_lowercase : List[str] = pipe(num_inference_steps=2 , generator=_UpperCamelCase , output_type="numpy" , return_dict=_UpperCamelCase )[0]
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = "google/ncsnpp-celebahq-256"
_lowercase : Any = UNetaDModel.from_pretrained(_UpperCamelCase )
_lowercase : List[Any] = KarrasVeScheduler()
_lowercase : int = KarrasVePipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : Optional[Any] = torch.manual_seed(0 )
_lowercase : Tuple = pipe(num_inference_steps=20 , generator=_UpperCamelCase , output_type="numpy" ).images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase : Tuple = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 250 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = MobileBertConfig.from_json_file(lowerCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
lowercase__ : str = MobileBertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
lowercase__ : str = load_tf_weights_in_mobilebert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 369 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self : Any ):
torch.manual_seed(0 )
lowercase__ : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def snake_case ( self : List[str] ):
torch.manual_seed(0 )
lowercase__ : Optional[int] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def snake_case ( self : Dict ):
torch.manual_seed(0 )
lowercase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Any = self.dummy_uncond_unet
lowercase__ : Dict = DDIMScheduler()
lowercase__ : Optional[Any] = self.dummy_vq_model
lowercase__ : Union[str, Any] = LDMPipeline(unet=SCREAMING_SNAKE_CASE , vqvae=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
ldm.to(SCREAMING_SNAKE_CASE )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.manual_seed(0 )
lowercase__ : Optional[int] = ldm(generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="numpy" ).images
lowercase__ : str = torch.manual_seed(0 )
lowercase__ : List[Any] = ldm(generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="numpy" , return_dict=SCREAMING_SNAKE_CASE )[0]
lowercase__ : Any = image[0, -3:, -3:, -1]
lowercase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : List[Any] = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
lowercase__ : Optional[Any] = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : int = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(SCREAMING_SNAKE_CASE )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = torch.manual_seed(0 )
lowercase__ : Tuple = ldm(generator=SCREAMING_SNAKE_CASE , num_inference_steps=5 , output_type="numpy" ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__ : Optional[Any] = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
lowercase__ : int = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 121 | 0 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
__UpperCAmelCase = True
from torch.cuda.amp import autocast
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_snake_case : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_snake_case : Optional[bool] = field(
default=_snake_case , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
_snake_case : Optional[bool] = field(
default=_snake_case , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
_snake_case : Optional[float] = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
_snake_case : Optional[float] = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
_snake_case : Optional[float] = field(
default=0.999995 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def lowercase__ ( __snake_case : ModelArguments , __snake_case : TrainingArguments ):
'''simple docstring'''
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase_ : List[str] = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase_ : Any = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase_ : Any = logging.INFO
logger.setLevel(__snake_case )
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : str = field(
default=_snake_case , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
_snake_case : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_snake_case : Optional[str] = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
_snake_case : Optional[str] = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
_snake_case : Optional[str] = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
_snake_case : bool = field(
default=_snake_case , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
_snake_case : Optional[int] = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
_snake_case : Optional[int] = field(
default=_snake_case , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_snake_case : Optional[float] = field(
default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : WavaVecaForPreTraining
_snake_case : WavaVecaFeatureExtractor
_snake_case : Union[bool, str] = "longest"
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
def __call__( self , _UpperCamelCase ) -> Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
UpperCAmelCase_ : int = self.feature_extractor.pad(
_UpperCamelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
UpperCAmelCase_ : Any = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
UpperCAmelCase_ : Union[str, Any] = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase_ : Tuple = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
UpperCAmelCase_ : str = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Dict = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase_ : int = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_UpperCamelCase , min_masks=2 , )
return batch
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=1.0 , **_UpperCamelCase ) -> Dict:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Any = max_gumbel_temp
UpperCAmelCase_ : int = min_gumbel_temp
UpperCAmelCase_ : Dict = gumbel_temp_decay
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> torch.Tensor:
model.train()
UpperCAmelCase_ : int = self._prepare_inputs(_UpperCamelCase )
if self.use_amp:
with autocast():
UpperCAmelCase_ : Union[str, Any] = self.compute_loss(_UpperCamelCase , _UpperCamelCase )
else:
UpperCAmelCase_ : Any = self.compute_loss(_UpperCamelCase , _UpperCamelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase_ : List[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase_ : Optional[Any] = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase_ : int = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_UpperCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_UpperCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_UpperCamelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(__snake_case , __snake_case )
# Downloading and loading a dataset from the hub.
UpperCAmelCase_ : Optional[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase_ : Tuple = DatasetDict()
UpperCAmelCase_ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase_ : Optional[int] = DatasetDict()
UpperCAmelCase_ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase_ : int = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__snake_case )
def prepare_dataset(__snake_case : Dict ):
# check that all files have the correct sampling rate
UpperCAmelCase_ , UpperCAmelCase_ : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase_ : List[Any] = datasets.map(
__snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
UpperCAmelCase_ : str = vectorized_datasets.filter(
lambda __snake_case : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__snake_case : Union[str, Any] ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase_ : Tuple = vectorized_datasets.map(
__snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase_ : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
UpperCAmelCase_ : Optional[Any] = WavaVecaForPreTraining(__snake_case )
UpperCAmelCase_ : Optional[int] = DataCollatorForWavaVecaPretraining(model=__snake_case , feature_extractor=__snake_case )
UpperCAmelCase_ : Optional[int] = WavaVecaPreTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=__snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 29 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
# TODO: is there an appropriate internal test set?
lowercase_ : int = """ssube/stable-diffusion-x4-upscaler-onnx"""
def UpperCamelCase ( self, lowerCamelCase=0) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = floats_tensor((1, 3, 1_28, 1_28), rng=random.Random(lowerCamelCase))
_lowercase : Union[str, Any] = torch.manual_seed(lowerCamelCase)
_lowercase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : str = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : List[Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : int = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[Any] = pipe(**lowerCamelCase).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_dummy_inputs()
_lowercase : List[str] = pipe(**lowerCamelCase).images
_lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : int = init_image.resize((1_28, 1_28))
# using the PNDM scheduler by default
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np', )
_lowercase : List[Any] = output.images
_lowercase : List[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : List[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : int = init_image.resize((1_28, 1_28))
_lowercase : str = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', subfolder='scheduler')
_lowercase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', scheduler=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=20, generator=lowerCamelCase, output_type='np', )
_lowercase : str = output.images
_lowercase : str = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 21 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list ) -> float:
_validate_point(__lowerCAmelCase )
_validate_point(__lowerCAmelCase )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCamelCase ( __lowerCAmelCase : list[float] ) -> None:
if point:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for item in point:
if not isinstance(__lowerCAmelCase , (int, float) ):
snake_case = (
"""Expected a list of numbers as input, found """
F'''{type(__lowerCAmelCase ).__name__}'''
)
raise TypeError(__lowerCAmelCase )
else:
snake_case = F'''Expected a list of numbers as input, found {type(__lowerCAmelCase ).__name__}'''
raise TypeError(__lowerCAmelCase )
else:
raise ValueError("""Missing an input""" )
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list ) -> float:
_validate_point(__lowerCAmelCase )
_validate_point(__lowerCAmelCase )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( __lowerCAmelCase : dict ) -> tuple:
return (data["data"], data["target"])
def __lowerCamelCase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ) -> XGBClassifier:
snake_case = XGBClassifier()
classifier.fit(__lowerCAmelCase , __lowerCAmelCase )
return classifier
def __lowerCamelCase ( ) -> None:
snake_case = load_iris()
snake_case , snake_case = data_handling(__lowerCAmelCase )
snake_case , snake_case , snake_case , snake_case = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 )
snake_case = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case = xgboost(__lowerCAmelCase , __lowerCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , display_labels=__lowerCAmelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 3 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =[
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Union[str, Any] , **__a : Dict ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_a = deprecated_arg[3:]
_a = not kwargs.pop(__a )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
_a = kwargs.pop("tpu_name" , self.tpu_name )
_a = kwargs.pop("device_idx" , self.device_idx )
_a = kwargs.pop("eager_mode" , self.eager_mode )
_a = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**__a )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Name of TPU'} , )
__a =field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
__a =field(default=lowerCamelCase_ , metadata={'help': 'Benchmark models in eager model.'} )
__a =field(
default=lowerCamelCase_ , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def UpperCamelCase__ ( self : List[str] ):
requires_backends(self , ["tf"] )
_a = None
if self.tpu:
try:
if self.tpu_name:
_a = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_a = None
return tpu
@cached_property
def UpperCamelCase__ ( self : Optional[int] ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_a = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
_a = tf.distribute.OneDeviceStrategy(device=f'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
_a = tf.distribute.OneDeviceStrategy(device=f'/cpu:{self.device_idx}' )
return strategy
@property
def UpperCamelCase__ ( self : List[Any] ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def UpperCamelCase__ ( self : str ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def UpperCamelCase__ ( self : Tuple ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def UpperCamelCase__ ( self : Tuple ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCamelCase__ ( self : Tuple ):
return self.n_gpu > 0
| 63 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='trocr'
__a =['past_key_values']
__a ={
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ):
_a = vocab_size
_a = d_model
_a = decoder_layers
_a = decoder_attention_heads
_a = decoder_ffn_dim
_a = activation_function
_a = max_position_embeddings
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = init_std
_a = decoder_layerdrop
_a = use_cache
_a = scale_embedding
_a = use_learned_position_embeddings
_a = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 63 | 1 |
_A : dict[str, float] = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.602176634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.355_818,
}
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase__ : Optional[int] = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(UpperCAmelCase )}"
)
raise ValueError(UpperCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265 |
import math
import random
def _a ( UpperCAmelCase , UpperCAmelCase = False ) -> float:
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_A : Tuple = 0.02
def _a ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
lowerCamelCase__ : List[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(UpperCAmelCase ):
# Forward propagation
lowerCamelCase__ : Optional[int] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowerCamelCase__ : int = (expected / 100) - layer_a
# Error delta
lowerCamelCase__ : Union[str, Any] = layer_1_error * sigmoid_function(UpperCAmelCase , UpperCAmelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : Union[str, Any] = int(input('Expected value: '))
_A : int = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 265 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( UpperCamelCase__):
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : str =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "num_attention_heads" ) )
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=6_4 , lowerCAmelCase__=3 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=1_6 , lowerCAmelCase__=[1_2_8, 2_5_6, 3_8_4] , lowerCAmelCase__=[4, 6, 8] , lowerCAmelCase__=[2, 3, 4] , lowerCAmelCase__=[1_6, 1_6, 1_6] , lowerCAmelCase__=0 , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , ) -> int:
'''simple docstring'''
a__ : Union[str, Any] =parent
a__ : int =batch_size
a__ : Union[str, Any] =image_size
a__ : List[Any] =num_channels
a__ : Optional[Any] =kernel_size
a__ : List[Any] =stride
a__ : str =padding
a__ : int =hidden_sizes
a__ : Union[str, Any] =num_attention_heads
a__ : Tuple =depths
a__ : Optional[int] =key_dim
a__ : List[str] =drop_path_rate
a__ : Any =patch_size
a__ : Union[str, Any] =attention_ratio
a__ : str =mlp_ratio
a__ : Tuple =initializer_range
a__ : Any =[
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
a__ : Tuple =is_training
a__ : List[str] =use_labels
a__ : List[Any] =num_labels
a__ : Optional[Any] =initializer_range
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : int =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : List[Any] =None
if self.use_labels:
a__ : List[Any] =ids_tensor([self.batch_size] , self.num_labels )
a__ : List[str] =self.get_config()
return config, pixel_values, labels
def _lowercase ( self ) -> int:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : List[Any] =LevitModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : List[str] =model(lowerCAmelCase__ )
a__ : Any =(self.image_size, self.image_size)
a__ , a__ : Tuple =image_size[0], image_size[1]
for _ in range(4 ):
a__ : Union[str, Any] =floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
a__ : Optional[int] =floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : Dict =self.num_labels
a__ : Any =LevitForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Optional[int] =model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : List[str] =self.prepare_config_and_inputs()
a__ , a__ , a__ : List[Any] =config_and_inputs
a__ : Optional[int] ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : Any = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_lowercase : Optional[int] = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_lowercase : str = False
_lowercase : Dict = False
_lowercase : Tuple = False
_lowercase : Dict = False
_lowercase : Optional[int] = False
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Tuple =LevitModelTester(self )
a__ : Union[str, Any] =ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _lowercase ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not output attentions" )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ , a__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict =model_class(lowerCAmelCase__ )
a__ : int =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : int =[*signature.parameters.keys()]
a__ : Optional[int] =["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Union[str, Any] =model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
a__ : Union[str, Any] =model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : List[Any] =outputs.hidden_states
a__ : int =len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
a__ : str =(self.model_tester.image_size, self.model_tester.image_size)
a__ , a__ : List[str] =image_size[0], image_size[1]
for _ in range(4 ):
a__ : str =floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
a__ : Any =floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
a__ , a__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict =True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : int =True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def _lowercase ( self ) -> str:
'''simple docstring'''
if not self.model_tester.is_training:
return
a__ , a__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
a__ : Optional[Any] =True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
a__ : Dict =model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
a__ : List[str] =self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
a__ : int =model(**lowerCAmelCase__ ).loss
loss.backward()
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ , a__ : int =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a__ : Optional[Any] =False
a__ : Any =True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
a__ : Optional[int] =model_class(lowerCAmelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase__ )
model.train()
a__ : int =self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
a__ : Optional[int] =model(**lowerCAmelCase__ ).loss
loss.backward()
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ , a__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
a__ : str =[
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
a__ : int =problem_type["title"]
a__ : Optional[int] =problem_type["num_labels"]
a__ : int =model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
a__ : Dict =self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if problem_type["num_labels"] > 1:
a__ : Optional[Any] =inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
a__ : Union[str, Any] =inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase__ ) as warning_list:
a__ : Any =model(**lowerCAmelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : List[Any] =LevitModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _A ( ):
"""simple docstring"""
a__ : Union[str, Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
@cached_property
def _lowercase ( self ) -> int:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Optional[Any] =LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase__ )
a__ : Any =self.default_image_processor
a__ : Tuple =prepare_img()
a__ : Dict =image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
a__ : Dict =model(**lowerCAmelCase__ )
# verify the logits
a__ : Optional[int] =torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
a__ : List[str] =torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 95 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Any = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 95 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : str = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = "lilt"
def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : List[Any]=1_2 , UpperCAmelCase_ : Tuple=1_2 , UpperCAmelCase_ : Dict=3_0_7_2 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=5_1_2 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : str=1e-12 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Optional[Any]="absolute" , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : int=1_0_2_4 , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : int = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : List[str] = num_attention_heads
a : Tuple = hidden_act
a : List[Any] = intermediate_size
a : Tuple = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : int = max_position_embeddings
a : int = type_vocab_size
a : int = initializer_range
a : Union[str, Any] = layer_norm_eps
a : Union[str, Any] = position_embedding_type
a : Optional[int] = classifier_dropout
a : int = channel_shrink_ratio
a : Dict = max_ad_position_embeddings
| 364 | '''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "masked_bert"
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict="topK" , UpperCAmelCase_ : str="constant" , UpperCAmelCase_ : Optional[Any]=0.0 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : Union[str, Any] = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = hidden_act
a : str = intermediate_size
a : Dict = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Dict = type_vocab_size
a : List[str] = initializer_range
a : int = layer_norm_eps
a : Dict = pruning_method
a : List[str] = mask_init
a : Union[str, Any] = mask_scale
| 345 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : str = {}
class __snake_case ( a ):
UpperCAmelCase__ : str = '''llama'''
UpperCAmelCase__ : Dict = ['''past_key_values''']
def __init__( self : str , _snake_case : List[str]=32000 , _snake_case : int=4096 , _snake_case : List[str]=11008 , _snake_case : Optional[int]=32 , _snake_case : List[Any]=32 , _snake_case : Tuple=None , _snake_case : int="silu" , _snake_case : List[Any]=2048 , _snake_case : List[str]=0.0_2 , _snake_case : Any=1e-6 , _snake_case : List[str]=True , _snake_case : Optional[Any]=0 , _snake_case : Dict=1 , _snake_case : List[Any]=2 , _snake_case : str=1 , _snake_case : Union[str, Any]=False , _snake_case : str=None , **_snake_case : List[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case , )
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _snake_case)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _snake_case)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""")
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""")
| 51 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase__ : Tuple = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ : List[Any] = {
'''allenai/led-base-16384''': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Tuple = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__SCREAMING_SNAKE_CASE : Any = bs[:]
__SCREAMING_SNAKE_CASE : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] ):
__SCREAMING_SNAKE_CASE : Dict = set()
__SCREAMING_SNAKE_CASE : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE : str = char
return pairs
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = VOCAB_FILES_NAMES
_A : Any = PRETRAINED_VOCAB_FILES_MAP
_A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]="replace" , lowerCAmelCase__ : Dict="<s>" , lowerCAmelCase__ : List[str]="</s>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : Tuple="<s>" , lowerCAmelCase__ : Union[str, Any]="<unk>" , lowerCAmelCase__ : Union[str, Any]="<pad>" , lowerCAmelCase__ : int="<mask>" , lowerCAmelCase__ : str=False , **lowerCAmelCase__ : int , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__SCREAMING_SNAKE_CASE : str = json.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : Dict = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE : Union[str, Any] = bytes_to_unicode()
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__SCREAMING_SNAKE_CASE : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
__SCREAMING_SNAKE_CASE : int = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : int = {}
__SCREAMING_SNAKE_CASE : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE : str = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE : Union[str, Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = bigram
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : Optional[int] = 0
while i < len(lowerCAmelCase__ ):
try:
__SCREAMING_SNAKE_CASE : Dict = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE : Dict = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE : Tuple = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_pairs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = """ """.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = word
return word
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = """""".join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__SCREAMING_SNAKE_CASE : int = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__SCREAMING_SNAKE_CASE : Tuple = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
__SCREAMING_SNAKE_CASE : List[Any] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
__SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=False , **lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE : int = """ """ + text
return (text, kwargs)
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = super()._pad(
encoded_inputs=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding_strategy=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
# Load from model defaults
if return_attention_mask is None:
__SCREAMING_SNAKE_CASE : Tuple = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__SCREAMING_SNAKE_CASE : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__SCREAMING_SNAKE_CASE : str = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCAmelCase__ )
if needs_to_be_padded:
__SCREAMING_SNAKE_CASE : Dict = len(lowerCAmelCase__ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__SCREAMING_SNAKE_CASE : Dict = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs | 112 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : int , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = 1_3
UpperCamelCase = 7
UpperCamelCase = 3_0
UpperCamelCase = self.seq_length + self.mem_len
UpperCamelCase = 1_5
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = 9_9
UpperCamelCase = [1_0, 5_0, 8_0]
UpperCamelCase = 3_2
UpperCamelCase = 3_2
UpperCamelCase = 4
UpperCamelCase = 8
UpperCamelCase = 1_2_8
UpperCamelCase = 2
UpperCamelCase = 2
UpperCamelCase = None
UpperCamelCase = 1
UpperCamelCase = 0
UpperCamelCase = 3
UpperCamelCase = self.vocab_size - 1
UpperCamelCase = 0.0_1
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def A ( self : Any ):
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def A ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = TFTransfoXLModel(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = model(UpperCamelCase__ ).to_tuple()
UpperCamelCase = {'input_ids': input_ids_a, 'mems': mems_a}
UpperCamelCase , UpperCamelCase = model(UpperCamelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def A ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = TFTransfoXLLMHeadModel(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = model(UpperCamelCase__ ).to_tuple()
UpperCamelCase = {'input_ids': input_ids_a, 'labels': lm_labels}
UpperCamelCase , UpperCamelCase = model(UpperCamelCase__ ).to_tuple()
UpperCamelCase , UpperCamelCase = model([input_ids_a, mems_a] ).to_tuple()
UpperCamelCase = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
UpperCamelCase , UpperCamelCase = model(UpperCamelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def A ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = TFTransfoXLForSequenceClassification(UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_SCREAMING_SNAKE_CASE = () if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFTransfoXLModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , d_embed=3_7 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : str ):
"""simple docstring"""
self.model_tester.set_seed()
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.model_tester.set_seed()
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCamelCase = model_class(UpperCamelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCamelCase = model.get_output_embeddings()
assert isinstance(UpperCamelCase__ , tf.keras.layers.Layer )
UpperCamelCase = model.get_bias()
assert name is None
else:
UpperCamelCase = model.get_output_embeddings()
assert x is None
UpperCamelCase = model.get_bias()
assert name is None
def A ( self : str ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFTransfoXLModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def A ( self : List[str] ):
"""simple docstring"""
pass
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
UpperCamelCase = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCamelCase = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCamelCase = model.generate(UpperCamelCase__ , max_length=2_0_0 , do_sample=UpperCamelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase__ )
| 364 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MODEL_FOR_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
UpperCamelCase = text_generator('This is a test' , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
UpperCamelCase = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
UpperCamelCase__ , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
UpperCamelCase = text_generator('This is a test' , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{'generated_token_ids': ANY(UpperCamelCase__ )},
{'generated_token_ids': ANY(UpperCamelCase__ )},
] , )
UpperCamelCase = text_generator.model.config.eos_token_id
UpperCamelCase = '<pad>'
UpperCamelCase = text_generator(
['This is a test', 'This is a second test'] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{'generated_token_ids': ANY(UpperCamelCase__ )},
{'generated_token_ids': ANY(UpperCamelCase__ )},
],
[
{'generated_token_ids': ANY(UpperCamelCase__ )},
{'generated_token_ids': ANY(UpperCamelCase__ )},
],
] , )
@require_tf
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
UpperCamelCase = text_generator('This is a test' , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
UpperCamelCase = text_generator(['This is a test', 'This is a second test'] , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def A ( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = TextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return text_generator, ["This is a test", "Another test"]
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = 'Hello I believe in'
UpperCamelCase = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
UpperCamelCase = text_generator(UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
UpperCamelCase = text_generator(UpperCamelCase__ , stop_sequence=' fe' )
self.assertEqual(UpperCamelCase__ , [{'generated_text': 'Hello I believe in fe'}] )
def A ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = text_generator.model
UpperCamelCase = text_generator.tokenizer
UpperCamelCase = text_generator('This is a test' )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
UpperCamelCase = text_generator('This is a test' , return_full_text=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
UpperCamelCase = pipeline(task='text-generation' , model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , return_full_text=UpperCamelCase__ )
UpperCamelCase = text_generator('This is a test' )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
UpperCamelCase = text_generator('This is a test' , return_full_text=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
UpperCamelCase = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCamelCase = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = text_generator('test' , return_full_text=UpperCamelCase__ , return_text=UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = text_generator('test' , return_full_text=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = text_generator('test' , return_text=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCamelCase = text_generator('' )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCamelCase = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCamelCase = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 5_0_0 , max_new_tokens=2_0 )
UpperCamelCase = text_generator('This is a test' * 5_0_0 , handle_long_generation='hole' , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(UpperCamelCase__ ):
text_generator(
'This is a test' * 5_0_0 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def A ( self : int ):
"""simple docstring"""
import torch
# Classic `model_kwargs`
UpperCamelCase = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase = pipe('This is a test' )
self.assertEqual(
UpperCamelCase__ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase = pipe('This is a test' )
self.assertEqual(
UpperCamelCase__ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCamelCase = pipe('This is a test' )
self.assertEqual(
UpperCamelCase__ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def A ( self : int ):
"""simple docstring"""
import torch
UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def A ( self : Any ):
"""simple docstring"""
import torch
UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=UpperCamelCase__ , top_p=0.5 )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = 'Hello world'
UpperCamelCase = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
UpperCamelCase = logging.get_logger('transformers.generation.tf_utils' )
else:
UpperCamelCase = logging.get_logger('transformers.generation.utils' )
UpperCamelCase = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(UpperCamelCase__ ) as cl:
UpperCamelCase = text_generator(UpperCamelCase__ , max_length=1_0 , max_new_tokens=1 )
self.assertIn(UpperCamelCase__ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(UpperCamelCase__ ) as cl:
UpperCamelCase = text_generator(UpperCamelCase__ , max_new_tokens=1 )
self.assertNotIn(UpperCamelCase__ , cl.out )
with CaptureLogger(UpperCamelCase__ ) as cl:
UpperCamelCase = text_generator(UpperCamelCase__ , max_length=1_0 )
self.assertNotIn(UpperCamelCase__ , cl.out )
| 249 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCAmelCase : Any = random.Random()
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any]=1.0 , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Optional[int]=None ):
"""simple docstring"""
if rng is None:
a__ : Optional[int] =global_rng
a__ : Optional[Any] =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=2_0_0_0 , lowerCAmelCase__=2_4 , lowerCAmelCase__=2_4 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_6_0_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=True , ) -> Tuple:
'''simple docstring'''
a__ : int =parent
a__ : str =batch_size
a__ : int =min_seq_length
a__ : Any =max_seq_length
a__ : Optional[Any] =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__ : Optional[int] =feature_size
a__ : Tuple =num_mel_bins
a__ : Optional[int] =padding_value
a__ : Optional[Any] =sampling_rate
a__ : List[str] =return_attention_mask
a__ : str =do_normalize
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowercase ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Dict:
'''simple docstring'''
def _flatten(lowerCAmelCase__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
a__ : Any =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a__ : Optional[int] =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__ : Optional[int] =[np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : List[Any] = SpeechaTextFeatureExtractor if is_speech_available() else None
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Any =SpeechaTextFeatureExtractionTester(self )
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__ : List[Any] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : List[Any] =[np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
a__ : int =feature_extractor(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
a__ : Optional[int] =feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
a__ : Optional[Any] =feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a__ : Dict =feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
a__ : Union[str, Any] =feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a__ : Optional[int] =[floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
a__ : Any =np.asarray(lowerCAmelCase__ )
a__ : List[str] =feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
a__ : str =feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : Dict =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : int =["longest", "max_length", "do_not_pad"]
a__ : Optional[Any] =[None, 1_6, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : List[str] =feature_extractor(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ )
a__ : Dict =inputs.input_features
a__ : Optional[int] =inputs.attention_mask
a__ : List[Any] =[np.sum(lowerCAmelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : List[str] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : Union[str, Any] =["longest", "max_length", "do_not_pad"]
a__ : Optional[int] =[None, 1_6, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Optional[int] =feature_extractor(
lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" , return_attention_mask=lowerCAmelCase__ )
a__ : Optional[int] =inputs.input_features
a__ : List[str] =inputs.attention_mask
a__ : Union[str, Any] =[np.sum(lowerCAmelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : str =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : List[Any] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : Dict =feature_extractor(
lowerCAmelCase__ , padding="max_length" , max_length=4 , truncation=lowerCAmelCase__ , return_tensors="np" , return_attention_mask=lowerCAmelCase__ , )
a__ : Optional[int] =inputs.input_features
a__ : Tuple =inputs.attention_mask
a__ : Optional[Any] =np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Any =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : Tuple =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : Dict =feature_extractor(
lowerCAmelCase__ , padding="longest" , max_length=4 , truncation=lowerCAmelCase__ , return_tensors="np" , return_attention_mask=lowerCAmelCase__ , )
a__ : Union[str, Any] =inputs.input_features
a__ : Union[str, Any] =inputs.attention_mask
a__ : List[Any] =np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4) )
a__ : List[Any] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : Any =feature_extractor(
lowerCAmelCase__ , padding="longest" , max_length=1_6 , truncation=lowerCAmelCase__ , return_tensors="np" , return_attention_mask=lowerCAmelCase__ , )
a__ : Tuple =inputs.input_features
a__ : List[Any] =inputs.attention_mask
a__ : int =np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4) )
def _lowercase ( self ) -> int:
'''simple docstring'''
import torch
a__ : Union[str, Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : Dict =np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
a__ : List[str] =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__ : List[str] =feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
a__ : Tuple =feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
from datasets import load_dataset
a__ : Tuple =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a__ : List[str] =ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Any =np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
a__ : Any =self._load_datasamples(1 )
a__ : Any =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : Tuple =feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) )
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , lowerCAmelCase__ , atol=1E-4 ) )
| 95 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ : Union[str, Any] = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = ['LayoutLMv2FeatureExtractor']
lowerCamelCase__ : Dict = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 225 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Optional[int] = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''vit_mae'''
def __init__( self , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-1_2 , __SCREAMING_SNAKE_CASE=2_24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=5_12 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=20_48 , __SCREAMING_SNAKE_CASE=0.75 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : str = hidden_size
lowercase_ : int = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : Tuple = initializer_range
lowercase_ : Dict = layer_norm_eps
lowercase_ : Optional[Any] = image_size
lowercase_ : str = patch_size
lowercase_ : List[str] = num_channels
lowercase_ : Tuple = qkv_bias
lowercase_ : Optional[int] = decoder_num_attention_heads
lowercase_ : Union[str, Any] = decoder_hidden_size
lowercase_ : str = decoder_num_hidden_layers
lowercase_ : Any = decoder_intermediate_size
lowercase_ : Union[str, Any] = mask_ratio
lowercase_ : Union[str, Any] = norm_pix_loss
| 264 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Tuple = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 264 | 1 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase__ = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
lowerCamelCase__ = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
lowerCamelCase__ = R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def __lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = 0.0
for i, j in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
n_correct += 1.0 if math_equivalence.is_equiv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 0.0
__lowerCAmelCase : Any = n_correct / len(_SCREAMING_SNAKE_CASE )
return {
"accuracy": accuracy,
} | 86 |
"""simple docstring"""
import math
import sys
def __lowerCAmelCase (_UpperCamelCase ):
if number != int(_UpperCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__lowerCAmelCase : Any = [-1] * (number + 1)
__lowerCAmelCase : List[Any] = 0
for i in range(1 , number + 1 ):
__lowerCAmelCase : List[Any] = sys.maxsize
__lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
__lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)]
__lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class __lowerCamelCase ( __snake_case ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> None:
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase ) | 354 |
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCamelCase_ = logging.getLogger(__name__)
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Optional[int] = 'masked_bert'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=0 , lowerCamelCase="topK" , lowerCamelCase="constant" , lowerCamelCase=0.0 , **lowerCamelCase , ) -> List[str]:
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = pruning_method
snake_case_ = mask_init
snake_case_ = mask_scale | 34 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( A__ : int , A__ : int ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
lowerCAmelCase_ : Dict = number_of_bytes // partitions
lowerCAmelCase_ : List[Any] = []
for i in range(A__ ):
lowerCAmelCase_ : Union[str, Any] = i * bytes_per_partition + 1
lowerCAmelCase_ : List[Any] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__A : Dict = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__A : List[Any] = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__A : str = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION)
class __snake_case ( datasets.Metric):
"""simple docstring"""
def __lowercase ( self : str ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def __lowercase ( self : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Optional[int]=1 , lowerCamelCase : Union[str, Any]="binary" , lowerCamelCase : Any=None , lowerCamelCase : str="warn" , ) -> List[Any]:
lowerCAmelCase_ : Optional[int] = recall_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase , zero_division=lowerCamelCase , )
return {"recall": float(lowerCamelCase ) if score.size == 1 else score}
| 120 | 1 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
a = logging.get_logger(__name__)
class lowercase_ :
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ):
_A = question_encoder
_A = generator
_A = self.question_encoder
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[Any] ):
if os.path.isfile(_UpperCAmelCase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_A = os.path.join(_UpperCAmelCase , 'question_encoder_tokenizer' )
_A = os.path.join(_UpperCAmelCase , 'generator_tokenizer' )
self.question_encoder.save_pretrained(_UpperCAmelCase )
self.generator.save_pretrained(_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : Dict , _UpperCAmelCase : List[Any] , **_UpperCAmelCase : Any ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_A = kwargs.pop('config' , _UpperCAmelCase )
if config is None:
_A = RagConfig.from_pretrained(_UpperCAmelCase )
_A = AutoTokenizer.from_pretrained(
_UpperCAmelCase , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
_A = AutoTokenizer.from_pretrained(
_UpperCAmelCase , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=_UpperCAmelCase , generator=_UpperCAmelCase )
def __call__( self : int , *_UpperCAmelCase : int , **_UpperCAmelCase : Tuple ):
return self.current_tokenizer(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : int , *_UpperCAmelCase : int , **_UpperCAmelCase : Union[str, Any] ):
return self.generator.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Tuple ):
return self.generator.decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.question_encoder
def lowerCAmelCase_ ( self : Tuple ):
_A = self.generator
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[List[str]] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : str = "longest" , _UpperCAmelCase : str = None , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ):
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , _UpperCAmelCase , )
if max_length is None:
_A = self.current_tokenizer.model_max_length
_A = self(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , max_length=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , **_UpperCAmelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_A = self.current_tokenizer.model_max_length
_A = self(
text_target=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , **_UpperCAmelCase , )
_A = labels['input_ids']
return model_inputs
| 271 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
a = logging.get_logger(__name__)
a = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Tuple=None , *_UpperCAmelCase : int , **_UpperCAmelCase : Tuple ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
if config is None:
assert isinstance(self.model , _UpperCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_A = self.model.config
else:
_A = config
_A = data_args
_A = self.config.tgt_vocab_size if isinstance(self.config , _UpperCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
' padding..' )
if self.args.label_smoothing == 0:
_A = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_A = label_smoothed_nll_loss
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : int ):
if self.optimizer is None:
_A = ['bias', 'LayerNorm.weight']
_A = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
_A = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_A = Adafactor
_A = {'scale_parameter': False, 'relative_step': False}
else:
_A = AdamW
_A = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
_A = self.args.learning_rate
if self.sharded_ddp:
_A = OSS(
params=_UpperCAmelCase , optim=_UpperCAmelCase , **_UpperCAmelCase , )
else:
_A = optimizer_cls(_UpperCAmelCase , **_UpperCAmelCase )
if self.lr_scheduler is None:
_A = self._get_lr_scheduler(_UpperCAmelCase )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Any ):
_A = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_A = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_A = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_A = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_UpperCAmelCase )
return scheduler
def lowerCAmelCase_ ( self : Any ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_A = model(**_UpperCAmelCase , use_cache=_UpperCAmelCase )[0]
_A = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_A , _A = model(**_UpperCAmelCase , labels=_UpperCAmelCase , use_cache=_UpperCAmelCase )[:2]
else:
# compute label smoothed loss
_A = model(**_UpperCAmelCase , use_cache=_UpperCAmelCase )[0]
_A = torch.nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )
_A , _A = self.loss_fn(_UpperCAmelCase , _UpperCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] ):
_A = inputs.pop('labels' )
_A , _A = self._compute_loss(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return loss
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : Dict[str, Union[torch.Tensor, Any]] , _UpperCAmelCase : bool , _UpperCAmelCase : Optional[List[str]] = None , ):
_A = self._prepare_inputs(_UpperCAmelCase )
_A = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_A = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **_UpperCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_A = self._pad_tensors_to_max_len(_UpperCAmelCase , gen_kwargs['max_length'] )
_A = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
_A , _A = self._compute_loss(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_A = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_A = self._pad_tensors_to_max_len(_UpperCAmelCase , gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
# If PAD token is not defined at least EOS token has to be defined
_A = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F''' padded to `max_length`={max_length}''' )
_A = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_A = tensor
return padded_tensor
| 271 | 1 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
UpperCAmelCase_ = HfArgumentParser(InitializationArguments)
UpperCAmelCase_ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
UpperCAmelCase_ = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
UpperCAmelCase_ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
UpperCAmelCase_ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 12 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
from bisect import bisect
from itertools import accumulate
def UpperCamelCase (lowercase_: Dict , lowercase_: Tuple , lowercase_: List[Any] , lowercase_: Union[str, Any] ) -> List[Any]:
A__ : List[Any] = sorted(zip(lowercase_ , lowercase_ ) , key=lambda lowercase_ : x[0] / x[1] , reverse=lowercase_ )
A__ : str = [i[0] for i in r], [i[1] for i in r]
A__ : Tuple = list(accumulate(lowercase_ ) )
A__ : Optional[int] = bisect(lowercase_ , lowercase_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
import requests
A_ : List[Any] = 'YOUR API KEY'
def UpperCamelCase (lowercase_: str , lowercase_: str = giphy_api_key ) -> list:
A__ : Dict = """+""".join(query.split() )
A__ : Optional[int] = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
A__ : Any = requests.get(lowercase_ ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 141 | 0 |
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
UpperCAmelCase : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
UpperCAmelCase : int = {
"facebook/blenderbot_small-90M": 512,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BlenderbotSmallTokenizer
def __init__( self : int , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[Any]="<|endoftext|>" , lowerCAmelCase_ : Dict="<|endoftext|>" , lowerCAmelCase_ : Tuple="<|endoftext|>" , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : str=True , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=lowerCAmelCase_ , merges=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , ) , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = add_prefix_space
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=None):
"""simple docstring"""
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 136 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1_00_00_00 ) -> int:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 1
lowercase_ = {1: 1}
for inputa in range(2 , __lowerCAmelCase ):
lowercase_ = 0
lowercase_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowercase_ = (3 * number) + 1
counter += 1
if inputa not in counters:
lowercase_ = counter
if counter > pre_counter:
lowercase_ = inputa
lowercase_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 136 | 1 |
import math
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case (__lowercase = 10_001 ) -> int:
'''simple docstring'''
try:
_snake_case : Union[str, Any] = int(__lowercase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
_snake_case : list[int] = []
_snake_case : Optional[Any] = 2
while len(__lowercase ) < nth:
if is_prime(__lowercase ):
primes.append(__lowercase )
num += 1
else:
num += 1
return primes[len(__lowercase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''') | 284 | from __future__ import annotations
def snake_case (__lowercase , __lowercase ) -> float:
'''simple docstring'''
_snake_case : Any = sorted(numsa + numsa )
_snake_case ,_snake_case : Any = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : Union[str, Any] = [float(x) for x in input('Enter the elements of first array: ').split()]
__SCREAMING_SNAKE_CASE : List[Any] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''') | 284 | 1 |
import random
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : dict = {i: [] for i in range(lowercase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowercase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowercase__ ):
for j in range(i + 1 , lowercase__ ):
if random.random() < probability:
graph[i].append(lowercase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowercase__ )
return graph
def _UpperCamelCase ( lowercase__ ):
return {
i: [j for j in range(lowercase__ ) if i != j] for i in range(lowercase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase__ )
class a ( lowercase__ ):
"""simple docstring"""
a : str = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
a : ClassVar[Features] = Features({'audio': Audio()} )
a : ClassVar[Features] = Features({'transcription': Value('string' )} )
a : str = "audio"
a : str = "transcription"
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Optional[int] ) -> str:
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowercase ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
__UpperCAmelCase : int = copy.deepcopy(self )
__UpperCAmelCase : str = self.input_schema.copy()
__UpperCAmelCase : List[str] = features[self.audio_column]
__UpperCAmelCase : Optional[Any] = input_schema
return task_template
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 114 | 0 |
'''simple docstring'''
import numpy as np
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = (0, 0)
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Any = 0
def __eq__( self , snake_case_ ):
'''simple docstring'''
return self.position == cell.position
def _UpperCamelCase ( self ):
'''simple docstring'''
print(self.position )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_=(5, 5) ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = np.zeros(snake_case_ )
UpperCAmelCase_ : Tuple = world_size[0]
UpperCAmelCase_ : int = world_size[1]
def _UpperCamelCase ( self ):
'''simple docstring'''
print(self.w )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
UpperCAmelCase_ : Optional[int] = cell.position[0]
UpperCAmelCase_ : Dict = cell.position[1]
UpperCAmelCase_ : int = []
for n in neughbour_cord:
UpperCAmelCase_ : Optional[int] = current_x + n[0]
UpperCAmelCase_ : Any = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
UpperCAmelCase_ : List[str] = Cell()
UpperCAmelCase_ : Optional[Any] = (x, y)
UpperCAmelCase_ : Optional[Any] = cell
neighbours.append(snake_case_ )
return neighbours
def _lowerCamelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : str = []
_open.append(lowerCamelCase_ )
while _open:
UpperCAmelCase_ : Union[str, Any] = np.argmin([n.f for n in _open] )
UpperCAmelCase_ : Optional[Any] = _open[min_f]
_closed.append(_open.pop(lowerCamelCase_ ) )
if current == goal:
break
for n in world.get_neigbours(lowerCamelCase_ ):
for c in _closed:
if c == n:
continue
UpperCAmelCase_ : Union[str, Any] = current.g + 1
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = n.position
UpperCAmelCase_ , UpperCAmelCase_ : Dict = goal.position
UpperCAmelCase_ : Tuple = (ya - ya) ** 2 + (xa - xa) ** 2
UpperCAmelCase_ : int = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = []
while current.parent is not None:
path.append(current.position )
UpperCAmelCase_ : Any = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
snake_case__ : Union[str, Any] = Gridworld()
# Start position and goal
snake_case__ : Optional[Any] = Cell()
snake_case__ : Optional[int] = (0, 0)
snake_case__ : int = Cell()
snake_case__ : List[Any] = (4, 4)
print(f'''path from {start.position} to {goal.position}''')
snake_case__ : Tuple = astar(world, start, goal)
# Just for visual reasons.
for i in s:
snake_case__ : Any = 1
print(world.w)
| 274 | '''simple docstring'''
snake_case__ : str = '''Tobias Carryer'''
from time import time
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ): # noqa: B008
'''simple docstring'''
UpperCAmelCase_ : str = multiplier
UpperCAmelCase_ : Dict = increment
UpperCAmelCase_ : Tuple = modulo
UpperCAmelCase_ : Dict = seed
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
snake_case__ : Any = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 274 | 1 |
def lowerCAmelCase__ ( lowerCamelCase_ : Any):
'''simple docstring'''
if len(lowerCamelCase_) <= 1:
return [tuple(lowerCamelCase_)]
lowerCAmelCase__ : Optional[int] = []
def generate(lowerCamelCase_ : List[str] ,lowerCamelCase_ : int):
if k == 1:
res.append(tuple(arr[:]))
return
generate(k - 1 ,lowerCamelCase_)
for i in range(k - 1):
if k % 2 == 0: # k is even
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[k - 1], arr[0]
generate(k - 1 ,lowerCamelCase_)
generate(len(lowerCamelCase_) ,lowerCamelCase_)
return res
if __name__ == "__main__":
__snake_case : List[Any] =input('Enter numbers separated by a comma:\n').strip()
__snake_case : Any =[int(item) for item in user_input.split(',')]
print(heaps(arr))
| 129 | import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__a : List[Any] = logging.getLogger(__name__)
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=lowercase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=lowercase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=lowercase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=lowercase , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=lowercase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=lowercase , type=lowercase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=lowercase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=lowercase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
__lowercase = parser.parse_args()
return args
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
def fn(lowercase ):
return tokenizer(examples['''text'''] )
return fn
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
__lowercase = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
__lowercase = tf.train.Features(feature=lowercase )
__lowercase = tf.train.Example(features=lowercase )
__lowercase = example.SerializeToString()
records.append(lowercase )
return records
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__lowercase = min(len(lowercase ) , args.limit )
__lowercase = dataset.select(range(lowercase ) )
print(F"Limiting the dataset to {args.limit} entries." )
__lowercase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__lowercase = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
else:
__lowercase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__lowercase = tokenize_function(lowercase )
__lowercase = dataset.map(lowercase , batched=lowercase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase ):
# Concatenate all texts.
__lowercase = {k: sum(examples[k] , [] ) for k in examples.keys()}
__lowercase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__lowercase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__lowercase = {
k: [t[i : i + args.max_length] for i in range(0 , lowercase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__lowercase = dataset_tokenized.map(lowercase , batched=lowercase , batch_size=1000 , num_proc=4 )
__lowercase = 0
__lowercase = 0
for shard in range(0 , len(lowercase ) , args.shard_size ):
__lowercase = grouped_dataset[shard : shard + args.shard_size]
__lowercase = len(dataset_snapshot['''input_ids'''] )
__lowercase = os.path.join(lowercase , F"dataset-{shard_count}-{records_containing}.tfrecord" )
__lowercase = get_serialized_examples(lowercase )
with tf.io.TFRecordWriter(lowercase ) as out_file:
for i in range(len(lowercase ) ):
__lowercase = serialized_examples[i]
out_file.write(lowercase )
print('''Wrote file {} containing {} records'''.format(lowercase , lowercase ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , '''w''' ) as f:
print(F"Total {args.split} records: {total_records}" , file=lowercase )
if __name__ == "__main__":
__a : Optional[Any] = parse_args()
main(args) | 210 | 0 |
"""simple docstring"""
from __future__ import annotations
__UpperCamelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> None:
snake_case_ = graph
# mapping node to its parent in resulting breadth first tree
snake_case_ = {}
snake_case_ = source_vertex
def a_ ( self) -> None:
snake_case_ = {self.source_vertex}
snake_case_ = None
snake_case_ = [self.source_vertex] # first in first out queue
while queue:
snake_case_ = queue.pop(0)
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCAmelCase__)
snake_case_ = vertex
queue.append(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case_ = self.parent.get(lowerCAmelCase__)
if target_vertex_parent is None:
snake_case_ = (
f'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(lowerCAmelCase__)
return self.shortest_path(lowerCAmelCase__) + f'->{target_vertex}'
if __name__ == "__main__":
__UpperCamelCase = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 361 | """simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(UpperCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
def UpperCAmelCase ( ) -> None:
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423]
snake_case_ = math.log(len(UpperCAmelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 312 | 0 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
random.seed(__lowercase )
np.random.seed(__lowercase )
torch.manual_seed(__lowercase )
torch.cuda.manual_seed_all(__lowercase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase , lowercase = 0.9999 , lowercase = 0.0 , lowercase = 0 , lowercase = False , lowercase = 1.0 , lowercase = 2 / 3 , lowercase = None , lowercase = None , **lowercase , ):
"""simple docstring"""
if isinstance(lowercase , torch.nn.Module ):
A_ : Tuple = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , lowercase , standard_warn=lowercase , )
A_ : Optional[Any] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A_ : str = True
if kwargs.get('max_value' , lowercase ) is not None:
A_ : int = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' , '1.0.0' , lowercase , standard_warn=lowercase )
A_ : List[str] = kwargs['max_value']
if kwargs.get('min_value' , lowercase ) is not None:
A_ : Dict = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' , '1.0.0' , lowercase , standard_warn=lowercase )
A_ : Optional[int] = kwargs['min_value']
A_ : str = list(lowercase )
A_ : Optional[int] = [p.clone().detach() for p in parameters]
if kwargs.get('device' , lowercase ) is not None:
A_ : Dict = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' , '1.0.0' , lowercase , standard_warn=lowercase )
self.to(device=kwargs['device'] )
A_ : Union[str, Any] = None
A_ : Dict = decay
A_ : Union[str, Any] = min_decay
A_ : Optional[Any] = update_after_step
A_ : Optional[Any] = use_ema_warmup
A_ : Tuple = inv_gamma
A_ : Optional[int] = power
A_ : Optional[int] = 0
A_ : Optional[Any] = None # set in `step()`
A_ : Optional[Any] = model_cls
A_ : List[str] = model_config
@classmethod
def lowerCAmelCase_ ( cls , lowercase , lowercase ):
"""simple docstring"""
A_ , A_ : List[Any] = model_cls.load_config(lowercase , return_unused_kwargs=lowercase )
A_ : List[Any] = model_cls.from_pretrained(lowercase )
A_ : Union[str, Any] = cls(model.parameters() , model_cls=lowercase , model_config=model.config )
ema_model.load_state_dict(lowercase )
return ema_model
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
A_ : Optional[Any] = self.model_cls.from_config(self.model_config )
A_ : Any = self.state_dict()
state_dict.pop('shadow_params' , lowercase )
model.register_to_config(**lowercase )
self.copy_to(model.parameters() )
model.save_pretrained(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A_ : Dict = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
A_ : Optional[int] = (1 + step) / (1_0 + step)
A_ : Tuple = min(lowercase , self.decay )
# make sure decay is not smaller than min_decay
A_ : List[Any] = max(lowercase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if isinstance(lowercase , torch.nn.Module ):
A_ : Dict = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , lowercase , standard_warn=lowercase , )
A_ : Any = parameters.parameters()
A_ : Tuple = list(lowercase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A_ : List[Any] = self.get_decay(self.optimization_step )
A_ : Optional[Any] = decay
A_ : Optional[Any] = 1 - decay
A_ : Tuple = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowercase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A_ : List[Any] = deepspeed.zero.GatheredParameters(lowercase , modifier_rank=lowercase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = list(lowercase )
for s_param, param in zip(self.shadow_params , lowercase ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase_ ( self , lowercase=None , lowercase=None ):
"""simple docstring"""
A_ : str = [
p.to(device=lowercase , dtype=lowercase ) if p.is_floating_point() else p.to(device=lowercase )
for p in self.shadow_params
]
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params , lowercase ):
param.data.copy_(c_param.data )
# Better memory-wise.
A_ : List[str] = None
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[str] = copy.deepcopy(lowercase )
A_ : Dict = state_dict.get('decay' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
A_ : List[Any] = state_dict.get('min_decay' , self.min_decay )
if not isinstance(self.min_decay , lowercase ):
raise ValueError('Invalid min_decay' )
A_ : str = state_dict.get('optimization_step' , self.optimization_step )
if not isinstance(self.optimization_step , lowercase ):
raise ValueError('Invalid optimization_step' )
A_ : Dict = state_dict.get('update_after_step' , self.update_after_step )
if not isinstance(self.update_after_step , lowercase ):
raise ValueError('Invalid update_after_step' )
A_ : str = state_dict.get('use_ema_warmup' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowercase ):
raise ValueError('Invalid use_ema_warmup' )
A_ : List[Any] = state_dict.get('inv_gamma' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('Invalid inv_gamma' )
A_ : List[Any] = state_dict.get('power' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('Invalid power' )
A_ : Union[str, Any] = state_dict.get('shadow_params' , lowercase )
if shadow_params is not None:
A_ : Tuple = shadow_params
if not isinstance(self.shadow_params , lowercase ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(lowercase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 140 | import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_UpperCAmelCase = """scheduler_config.json"""
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = 5
lowerCamelCase_ = 6
lowerCamelCase_ = 7
lowerCamelCase_ = 8
lowerCamelCase_ = 9
lowerCamelCase_ = 1_0
lowerCamelCase_ = 1_1
lowerCamelCase_ = 1_2
lowerCamelCase_ = 1_3
lowerCamelCase_ = 1_4
@dataclass
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 42
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = SCHEDULER_CONFIG_NAME
lowerCamelCase_ = []
lowerCamelCase_ = True
@classmethod
def lowerCAmelCase_ ( cls , lowercase = None , lowercase = None , lowercase=False , **lowercase , ):
"""simple docstring"""
A_ , A_ , A_ : int = cls.load_config(
pretrained_model_name_or_path=lowercase , subfolder=lowercase , return_unused_kwargs=lowercase , return_commit_hash=lowercase , **lowercase , )
return cls.from_config(lowercase , return_unused_kwargs=lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , **lowercase ):
"""simple docstring"""
self.save_config(save_directory=lowercase , push_to_hub=lowercase , **lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
A_ : Optional[Any] = list(set([cls.__name__] + cls._compatibles ) )
A_ : Any = importlib.import_module(__name__.split('.' )[0] )
A_ : Tuple = [
getattr(lowercase , lowercase ) for c in compatible_classes_str if hasattr(lowercase , lowercase )
]
return compatible_classes
| 140 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : Optional[Any] = original_name.split(""".""" )[0]
_snake_case : List[str] = key.split(""".""" )
_snake_case : List[Any] = int(key_list[key_list.index(snake_case__ ) - 2] )
_snake_case : Optional[int] = int(key_list[key_list.index(snake_case__ ) - 1] )
_snake_case : Union[str, Any] = orig_block_num - offset
_snake_case : Union[str, Any] = key.replace(F"{orig_block_num}.{layer_num}.{original_name}" , F"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Optional[int] = OrderedDict()
_snake_case , _snake_case : Optional[int] = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
_snake_case : Any = key.replace("""network""" , """poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
_snake_case : List[str] = key[: key.find("""proj""" )]
_snake_case : List[str] = key.replace(snake_case__ , F"patch_embeddings.{total_embed_found}." )
_snake_case : Optional[int] = key.replace("""proj""" , """projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
_snake_case : List[str] = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
_snake_case : Optional[int] = replace_key_with_offset(snake_case__ , snake_case__ , """mlp.fc1""" , """output.conv1""" )
if "mlp.fc2" in key:
_snake_case : Optional[int] = replace_key_with_offset(snake_case__ , snake_case__ , """mlp.fc2""" , """output.conv2""" )
if "norm1" in key:
_snake_case : Tuple = replace_key_with_offset(snake_case__ , snake_case__ , """norm1""" , """before_norm""" )
if "norm2" in key:
_snake_case : Any = replace_key_with_offset(snake_case__ , snake_case__ , """norm2""" , """after_norm""" )
if "layer_scale_1" in key:
_snake_case : Tuple = replace_key_with_offset(snake_case__ , snake_case__ , """layer_scale_1""" , """layer_scale_1""" )
if "layer_scale_2" in key:
_snake_case : Union[str, Any] = replace_key_with_offset(snake_case__ , snake_case__ , """layer_scale_2""" , """layer_scale_2""" )
if "head" in key:
_snake_case : List[str] = key.replace("""head""" , """classifier""" )
_snake_case : Any = value
return new_state_dict
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : int = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return image
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : int = PoolFormerConfig()
# set attributes based on model_name
_snake_case : Dict = """huggingface/label-files"""
_snake_case : List[str] = model_name[-3:]
_snake_case : List[str] = 10_00
_snake_case : Optional[int] = """imagenet-1k-id2label.json"""
_snake_case : List[Any] = (1, 10_00)
# set config attributes
_snake_case : int = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : str = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : Dict = idalabel
_snake_case : int = {v: k for k, v in idalabel.items()}
if size == "s12":
_snake_case : Optional[int] = [2, 2, 6, 2]
_snake_case : Optional[Any] = [64, 1_28, 3_20, 5_12]
_snake_case : Union[str, Any] = 4.0
_snake_case : Dict = 0.9
elif size == "s24":
_snake_case : Dict = [4, 4, 12, 4]
_snake_case : Dict = [64, 1_28, 3_20, 5_12]
_snake_case : int = 4.0
_snake_case : Dict = 0.9
elif size == "s36":
_snake_case : Dict = [6, 6, 18, 6]
_snake_case : Dict = [64, 1_28, 3_20, 5_12]
_snake_case : int = 4.0
_snake_case : List[str] = 1e-6
_snake_case : Optional[Any] = 0.9
elif size == "m36":
_snake_case : Dict = [6, 6, 18, 6]
_snake_case : Dict = [96, 1_92, 3_84, 7_68]
_snake_case : List[str] = 4.0
_snake_case : int = 1e-6
_snake_case : int = 0.95
elif size == "m48":
_snake_case : Optional[int] = [8, 8, 24, 8]
_snake_case : List[Any] = [96, 1_92, 3_84, 7_68]
_snake_case : List[str] = 4.0
_snake_case : Optional[Any] = 1e-6
_snake_case : Any = 0.95
else:
raise ValueError(F"Size {size} not supported" )
# load image processor
_snake_case : Union[str, Any] = PoolFormerImageProcessor(crop_pct=snake_case__ )
# Prepare image
_snake_case : Optional[Any] = prepare_img()
_snake_case : List[str] = image_processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
_snake_case : Tuple = torch.load(snake_case__ , map_location=torch.device("""cpu""" ) )
# rename keys
_snake_case : Tuple = rename_keys(snake_case__ )
# create HuggingFace model and load state dict
_snake_case : int = PoolFormerForImageClassification(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Define image processor
_snake_case : str = PoolFormerImageProcessor(crop_pct=snake_case__ )
_snake_case : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values
# forward pass
_snake_case : Optional[Any] = model(snake_case__ )
_snake_case : int = outputs.logits
# define expected logit slices for different models
if size == "s12":
_snake_case : Any = torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
_snake_case : Union[str, Any] = torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
_snake_case : Union[str, Any] = torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
_snake_case : str = torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
_snake_case : Optional[int] = torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(F"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
A_ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 132 |
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : int=False ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
_snake_case : Dict = os.path.abspath(snake_case__ )
logger.info(F"Loading PyTorch weights from {pt_path}" )
_snake_case : Tuple = torch.load(snake_case__ , map_location="""cpu""" )
logger.info(F"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." )
_snake_case : int = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
_snake_case : Dict = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ )
return flax_state_dict
def UpperCAmelCase__ (snake_case__ : Tuple[str] , snake_case__ : np.ndarray , snake_case__ : Dict[str, jnp.ndarray] , snake_case__ : str , ):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(snake_case__ : Tuple[str] ) -> bool:
return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
_snake_case : Any = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
_snake_case : Optional[Any] = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
_snake_case : Any = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
_snake_case : Any = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
_snake_case : Optional[int] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ):
_snake_case : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_snake_case : List[str] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ):
_snake_case : List[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_snake_case : List[Any] = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_snake_case : Tuple = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
_snake_case : Optional[Any] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
_snake_case : Union[str, Any] = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
_snake_case : Dict = pt_tuple_key[-2] + """_v"""
if name is not None:
_snake_case : Union[str, Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
_snake_case : int = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
_snake_case : Dict = flax_model.params["""params"""]
else:
_snake_case : List[Any] = flax_model.params
_snake_case : Tuple = flatten_dict(snake_case__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_snake_case : Union[str, Any] = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(snake_case__ )
_snake_case : Tuple = {}
_snake_case : Dict = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
_snake_case : Optional[int] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_snake_case : int = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
_snake_case : Optional[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_snake_case : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
_snake_case , _snake_case : int = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
_snake_case : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_snake_case : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
_snake_case : Union[str, Any] = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
_snake_case : List[Any] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
_snake_case : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
import torch
# Load the index
_snake_case : str = {}
for shard_file in shard_filenames:
# load using msgpack utils
_snake_case : Union[str, Any] = torch.load(snake_case__ )
_snake_case : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
_snake_case : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_snake_case : str = flax_model.params["""params"""]
_snake_case : List[Any] = flatten_dict(snake_case__ )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
_snake_case : List[Any] = flax_model.params
_snake_case : Tuple = flatten_dict(snake_case__ )
_snake_case : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
_snake_case : Optional[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_snake_case : List[str] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
_snake_case : str = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_snake_case : Optional[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
_snake_case , _snake_case : Optional[Any] = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
_snake_case : List[str] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_snake_case : Any = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
_snake_case : Optional[int] = jnp.asarray(snake_case__ )
continue
if "var" in flax_key[-1]:
_snake_case : Any = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
_snake_case : List[str] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
_snake_case : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : Optional[Any] = os.path.abspath(snake_case__ )
logger.info(F"Loading Flax weights from {flax_checkpoint_path}" )
# import correct flax class
_snake_case : Union[str, Any] = getattr(snake_case__ , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(snake_case__ , """rb""" ) as state_f:
try:
_snake_case : Dict = from_bytes(snake_case__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Optional[int] ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_snake_case : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_snake_case : Optional[int] = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
_snake_case : Dict = flatten_dict(snake_case__ )
_snake_case : Optional[Any] = pt_model.state_dict()
_snake_case : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
_snake_case : Optional[int] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
_snake_case : str = []
_snake_case : Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_snake_case : Tuple = flax_key_tuple[0] == pt_model.base_model_prefix
_snake_case : Optional[Any] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
_snake_case : List[str] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
_snake_case : Union[str, Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict:
# conv layer
_snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
_snake_case : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict:
# linear layer
_snake_case : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
_snake_case : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_snake_case : int = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
_snake_case : Tuple = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
_snake_case : Optional[int] = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
_snake_case : int = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
_snake_case : int = """.""".join(snake_case__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
_snake_case : Optional[Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
_snake_case : List[str] = key.split(""".""" )
_snake_case : Optional[int] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
_snake_case : int = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
_snake_case : Union[str, Any] = key_components[-2] + """_v"""
if name is not None:
_snake_case : Dict = key_components[:-3] + [name]
_snake_case : Dict = """.""".join(snake_case__ )
_snake_case : str = key
if flax_key in special_pt_names:
_snake_case : Union[str, Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
_snake_case : List[str] = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
_snake_case : List[Any] = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
_snake_case : List[str] = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(F"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" )
if len(snake_case__ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
else:
logger.warning(
F"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"
"""If your task is similar to the task the model of the checkpoint was trained on, """
F"you can already use {pt_model.__class__.__name__} for predictions without further training." )
return pt_model
| 132 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 59 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] ) -> List[str]:
'''simple docstring'''
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(snake_case__ ) for s in shape] )}.npy"""
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[Any]=0 , snake_case__ : Any=(4, 4, 64, 64) , snake_case__ : List[Any]=False ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
snake_case : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ )
return image
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple=False , snake_case__ : List[Any]="CompVis/stable-diffusion-v1-4" ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = jnp.bfloataa if fpaa else jnp.floataa
snake_case : str = "bf16" if fpaa else None
snake_case , snake_case : Optional[int] = FlaxUNetaDConditionModel.from_pretrained(
snake_case__ , subfolder="unet" , dtype=snake_case__ , revision=snake_case__ )
return model, params
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=(4, 77, 7_68) , snake_case__ : Dict=False ) -> List[str]:
'''simple docstring'''
snake_case : Any = jnp.bfloataa if fpaa else jnp.floataa
snake_case : Any = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Dict ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : List[str] = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=snake_case__ )
snake_case : Union[str, Any] = self.get_latents(snake_case__ , fpaa=snake_case__ )
snake_case : List[str] = self.get_encoder_hidden_states(snake_case__ , fpaa=snake_case__ )
snake_case : Dict = model.apply(
{"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample
assert sample.shape == latents.shape
snake_case : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case : Optional[int] = jnp.array(snake_case__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Tuple ) -> str:
'''simple docstring'''
snake_case , snake_case : List[Any] = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=snake_case__ )
snake_case : List[str] = self.get_latents(snake_case__ , shape=(4, 4, 96, 96) , fpaa=snake_case__ )
snake_case : Union[str, Any] = self.get_encoder_hidden_states(snake_case__ , shape=(4, 77, 10_24) , fpaa=snake_case__ )
snake_case : Optional[int] = model.apply(
{"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample
assert sample.shape == latents.shape
snake_case : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case : Dict = jnp.array(snake_case__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 )
| 59 | 1 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int = 1_00 ) -> int:
'''simple docstring'''
UpperCamelCase__ = sum(i * i for i in range(1 , n + 1 ) )
UpperCamelCase__ = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""") | 354 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) ) | 31 | 0 |
'''simple docstring'''
import argparse
import copy
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : List[Any] = {}
with open(__SCREAMING_SNAKE_CASE ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase_ : Union[str, Any] = []
_list.append([line.split()[1], line.split()[2]] )
lowercase_ : str = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase_ : Optional[int] = []
_list.append([line.split()[0], line.split()[2]] )
lowercase_ : Dict = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE ) as f:
lowercase_ : List[str] = f.read(1 )
lowercase_ : Optional[int] = start_node
lowercase_ : Any = []
lowercase_ : List[str] = start_node
lowercase_ : Optional[Any] = 0
while visiting not in first_solution:
lowercase_ : Any = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__SCREAMING_SNAKE_CASE ) and k[0] not in first_solution:
lowercase_ : List[Any] = k[1]
lowercase_ : List[Any] = k[0]
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = distance_of_first_solution + int(__SCREAMING_SNAKE_CASE )
lowercase_ : int = best_node
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase_ : Optional[Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
lowercase_ : Tuple = []
for n in solution[1:-1]:
lowercase_ : List[str] = solution.index(__SCREAMING_SNAKE_CASE )
for kn in solution[1:-1]:
lowercase_ : Any = solution.index(__SCREAMING_SNAKE_CASE )
if n == kn:
continue
lowercase_ : Dict = copy.deepcopy(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = kn
lowercase_ : List[Any] = n
lowercase_ : str = 0
for k in _tmp[:-1]:
lowercase_ : Tuple = _tmp[_tmp.index(__SCREAMING_SNAKE_CASE ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase_ : Optional[Any] = distance + int(i[1] )
_tmp.append(__SCREAMING_SNAKE_CASE )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase_ : Union[str, Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __SCREAMING_SNAKE_CASE : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = 1
lowercase_ : List[str] = first_solution
lowercase_ : Dict = []
lowercase_ : List[str] = distance_of_first_solution
lowercase_ : Optional[Any] = solution
while count <= iters:
lowercase_ : int = find_neighborhood(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = 0
lowercase_ : Dict = neighborhood[index_of_best_solution]
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) - 1
lowercase_ : Tuple = False
while not found:
lowercase_ : Optional[int] = 0
while i < len(__SCREAMING_SNAKE_CASE ):
if best_solution[i] != solution[i]:
lowercase_ : Tuple = best_solution[i]
lowercase_ : Optional[int] = solution[i]
break
lowercase_ : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase_ : Tuple = True
lowercase_ : Optional[int] = best_solution[:-1]
lowercase_ : Optional[Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase_ : Optional[Any] = cost
lowercase_ : int = solution
else:
lowercase_ : Any = index_of_best_solution + 1
lowercase_ : Any = neighborhood[index_of_best_solution]
if len(__SCREAMING_SNAKE_CASE ) >= size:
tabu_list.pop(0 )
lowercase_ : List[Any] = count + 1
return best_solution_ever, best_cost
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str]=None ):
"""simple docstring"""
lowercase_ : Any = generate_neighbours(args.File )
lowercase_ , lowercase_ : Union[str, Any] = generate_first_solution(
args.File , __SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Optional[int] = tabu_search(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 93 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
lowercase_ : Tuple = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = torch.nn.CosineSimilarity(3 , 1E-0_8 )
lowercase_ : Optional[Any] = torch.nn.Softmax(dim=1 )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = W_supports['''sizes'''].tolist()
lowercase_ : Dict = W_supports['''start_token_id'''].item()
lowercase_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowercase_ : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : str = None
lowercase_ : Dict = None
lowercase_ : Tuple = W_supports['''input_ids'''] == start_token_id
lowercase_ : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
lowercase_ : List[str] = 0
else:
lowercase_ : List[Any] = support_sizes[i - 1]
lowercase_ : str = S[s : s + size][start_token_masks[s : s + size]]
lowercase_ : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
lowercase_ : List[str] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowercase_ : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowercase_ : Tuple = torch.vstack((p_starts, p_start) )
lowercase_ : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
lowercase_ : str = p_start
lowercase_ : int = p_end
return p_starts, p_ends
| 93 | 1 |
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
_A = None
_A = {
"""7B""": 1_10_08,
"""13B""": 1_38_24,
"""30B""": 1_79_20,
"""65B""": 2_20_16,
"""70B""": 2_86_72,
}
_A = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def a__ ( lowerCAmelCase , lowerCAmelCase=1 , lowerCAmelCase=2_56 ) -> Union[str, Any]:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def a__ ( lowerCAmelCase ) -> List[Any]:
with open(lowerCAmelCase , """r""" ) as f:
return json.load(lowerCAmelCase )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> List[str]:
with open(lowerCAmelCase , """w""" ) as f:
json.dump(lowerCAmelCase , lowerCAmelCase )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=True ) -> Optional[int]:
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
UpperCAmelCase__ : List[Any] = os.path.join(lowerCAmelCase , """tmp""" )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
UpperCAmelCase__ : int = read_json(os.path.join(lowerCAmelCase , """params.json""" ) )
UpperCAmelCase__ : Union[str, Any] = NUM_SHARDS[model_size]
UpperCAmelCase__ : str = params["""n_layers"""]
UpperCAmelCase__ : Optional[Any] = params["""n_heads"""]
UpperCAmelCase__ : Optional[int] = n_heads // num_shards
UpperCAmelCase__ : int = params["""dim"""]
UpperCAmelCase__ : List[str] = dim // n_heads
UpperCAmelCase__ : List[str] = 1_0000.0
UpperCAmelCase__ : List[str] = 1.0 / (base ** (torch.arange(0 , lowerCAmelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
UpperCAmelCase__ : List[Any] = params["""n_kv_heads"""] # for GQA / MQA
UpperCAmelCase__ : Optional[int] = n_heads_per_shard // num_key_value_heads
UpperCAmelCase__ : str = dim // num_key_value_heads
else: # compatibility with other checkpoints
UpperCAmelCase__ : Union[str, Any] = n_heads
UpperCAmelCase__ : str = n_heads_per_shard
UpperCAmelCase__ : Optional[int] = dim
# permute for sliced rotary
def permute(lowerCAmelCase , lowerCAmelCase=n_heads , lowerCAmelCase=dim , lowerCAmelCase=dim ):
return w.view(lowerCAmelCase , dima // n_heads // 2 , 2 , lowerCAmelCase ).transpose(1 , 2 ).reshape(lowerCAmelCase , lowerCAmelCase )
print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
UpperCAmelCase__ : Any = torch.load(os.path.join(lowerCAmelCase , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
UpperCAmelCase__ : Dict = [
torch.load(os.path.join(lowerCAmelCase , F"""consolidated.{i:02d}.pth""" ) , map_location="""cpu""" )
for i in range(lowerCAmelCase )
]
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : Optional[int] = {"""weight_map""": {}}
for layer_i in range(lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
UpperCAmelCase__ : List[Any] = {
F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wq.weight"""] ),
F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wk.weight"""] ),
F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""],
F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""],
F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""],
F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""],
F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""],
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""],
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
UpperCAmelCase__ : int = {
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.attention_norm.weight"""
].clone(),
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
UpperCAmelCase__ : List[str] = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
for i in range(lowerCAmelCase )
] , dim=0 , ).reshape(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase__ : int = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
for i in range(lowerCAmelCase )
] , dim=0 , ).reshape(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
UpperCAmelCase__ : List[str] = torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
for i in range(lowerCAmelCase )
] , dim=0 , ).reshape(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = torch.cat(
[loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(lowerCAmelCase )] , dim=1 )
UpperCAmelCase__ : Union[str, Any] = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(lowerCAmelCase )] , dim=0 )
UpperCAmelCase__ : List[str] = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(lowerCAmelCase )] , dim=1 )
UpperCAmelCase__ : Tuple = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(lowerCAmelCase )] , dim=0 )
UpperCAmelCase__ : Dict = inv_freq
for k, v in state_dict.items():
UpperCAmelCase__ : Dict = filename
param_count += v.numel()
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase__ : Dict = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
UpperCAmelCase__ : Union[str, Any] = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
UpperCAmelCase__ : str = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(lowerCAmelCase )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(lowerCAmelCase )] , dim=0 ),
}
for k, v in state_dict.items():
UpperCAmelCase__ : Optional[Any] = filename
param_count += v.numel()
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
# Write configs
UpperCAmelCase__ : str = {"""total_size""": param_count * 2}
write_json(lowerCAmelCase , os.path.join(lowerCAmelCase , """pytorch_model.bin.index.json""" ) )
UpperCAmelCase__ : str = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
UpperCAmelCase__ : List[str] = params["""multiple_of"""] if """multiple_of""" in params else 2_56
UpperCAmelCase__ : int = LlamaConfig(
hidden_size=lowerCAmelCase , intermediate_size=compute_intermediate_size(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=lowerCAmelCase , )
config.save_pretrained(lowerCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
UpperCAmelCase__ : int = LlamaForCausalLM.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=lowerCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(lowerCAmelCase , safe_serialization=lowerCAmelCase )
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
# Initialize the tokenizer based on the `spm` model
UpperCAmelCase__ : Dict = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
UpperCAmelCase__ : Dict = tokenizer_class(lowerCAmelCase )
tokenizer.save_pretrained(lowerCAmelCase )
def a__ ( ) -> Tuple:
UpperCAmelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=lowerCAmelCase , help="""Whether or not to save using `safetensors`.""" )
UpperCAmelCase__ : Dict = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
UpperCAmelCase__ : List[Any] = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , lowerCAmelCase )
if __name__ == "__main__":
main()
| 166 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__(self , _lowerCamelCase = 65536 , _lowerCamelCase = None , _lowerCamelCase = 2 , _lowerCamelCase = 2 , _lowerCamelCase = 0 , _lowerCamelCase = "fourier" , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = 0.0 , _lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _lowerCamelCase = "UNetMidBlock1D" , _lowerCamelCase = None , _lowerCamelCase = (32, 32, 64) , _lowerCamelCase = None , _lowerCamelCase = 8 , _lowerCamelCase = 1 , _lowerCamelCase = False , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ : str = sample_size
# time
if time_embedding_type == "fourier":
UpperCAmelCase__ : Any = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_lowerCamelCase , log=_lowerCamelCase , flip_sin_to_cos=_lowerCamelCase )
UpperCAmelCase__ : Tuple = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
UpperCAmelCase__ : List[str] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_lowerCamelCase , downscale_freq_shift=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = block_out_channels[0]
if use_timestep_embedding:
UpperCAmelCase__ : Optional[Any] = block_out_channels[0] * 4
UpperCAmelCase__ : Any = TimestepEmbedding(
in_channels=_lowerCamelCase , time_embed_dim=_lowerCamelCase , act_fn=_lowerCamelCase , out_dim=block_out_channels[0] , )
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([] )
UpperCAmelCase__ : int = None
UpperCAmelCase__ : str = nn.ModuleList([] )
UpperCAmelCase__ : Optional[int] = None
# down
UpperCAmelCase__ : List[str] = in_channels
for i, down_block_type in enumerate(_lowerCamelCase ):
UpperCAmelCase__ : Optional[Any] = output_channel
UpperCAmelCase__ : List[str] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
UpperCAmelCase__ : Any = i == len(_lowerCamelCase ) - 1
UpperCAmelCase__ : Dict = get_down_block(
_lowerCamelCase , num_layers=_lowerCamelCase , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_lowerCamelCase )
# mid
UpperCAmelCase__ : Optional[Any] = get_mid_block(
_lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_lowerCamelCase , add_downsample=_lowerCamelCase , )
# up
UpperCAmelCase__ : Tuple = list(reversed(_lowerCamelCase ) )
UpperCAmelCase__ : List[str] = reversed_block_out_channels[0]
if out_block_type is None:
UpperCAmelCase__ : int = out_channels
else:
UpperCAmelCase__ : List[Any] = block_out_channels[0]
for i, up_block_type in enumerate(_lowerCamelCase ):
UpperCAmelCase__ : Any = output_channel
UpperCAmelCase__ : Dict = (
reversed_block_out_channels[i + 1] if i < len(_lowerCamelCase ) - 1 else final_upsample_channels
)
UpperCAmelCase__ : Union[str, Any] = i == len(_lowerCamelCase ) - 1
UpperCAmelCase__ : Optional[int] = get_up_block(
_lowerCamelCase , num_layers=_lowerCamelCase , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_lowerCamelCase )
UpperCAmelCase__ : Dict = output_channel
# out
UpperCAmelCase__ : Optional[int] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
UpperCAmelCase__ : int = get_out_block(
out_block_type=_lowerCamelCase , num_groups_out=_lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=_lowerCamelCase , act_fn=_lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = timestep
if not torch.is_tensor(_lowerCamelCase ):
UpperCAmelCase__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_lowerCamelCase ) and len(timesteps.shape ) == 0:
UpperCAmelCase__ : List[str] = timesteps[None].to(sample.device )
UpperCAmelCase__ : Optional[Any] = self.time_proj(_lowerCamelCase )
if self.config.use_timestep_embedding:
UpperCAmelCase__ : Dict = self.time_mlp(_lowerCamelCase )
else:
UpperCAmelCase__ : int = timestep_embed[..., None]
UpperCAmelCase__ : List[str] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
UpperCAmelCase__ : Dict = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
UpperCAmelCase__ : Optional[Any] = ()
for downsample_block in self.down_blocks:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = downsample_block(hidden_states=_lowerCamelCase , temb=_lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
UpperCAmelCase__ : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
UpperCAmelCase__ : int = down_block_res_samples[-1:]
UpperCAmelCase__ : Dict = down_block_res_samples[:-1]
UpperCAmelCase__ : str = upsample_block(_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , temb=_lowerCamelCase )
# 5. post-process
if self.out_block:
UpperCAmelCase__ : str = self.out_block(_lowerCamelCase , _lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_lowerCamelCase )
| 166 | 1 |
"""simple docstring"""
UpperCAmelCase_ : Dict = range(2, 20 + 1)
UpperCAmelCase_ : Union[str, Any] = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def _A (__a , __a , __a , __a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = sum(a_i[j] for j in range(__a , len(__a ) ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(__a ) , __a ) ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0, 0
SCREAMING_SNAKE_CASE_ : str = n - i
SCREAMING_SNAKE_CASE_ : Dict = memo.get(__a )
if sub_memo is not None:
SCREAMING_SNAKE_CASE_ : str = sub_memo.get(__a )
if jumps is not None and len(__a ) > 0:
# find and make the largest jump without going over
SCREAMING_SNAKE_CASE_ : List[str] = -1
for _k in range(len(__a ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
SCREAMING_SNAKE_CASE_ : Optional[Any] = _k
break
if max_jump >= 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
SCREAMING_SNAKE_CASE_ : Optional[Any] = diff + c
for j in range(min(__a , len(__a ) ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = divmod(__a , 10 )
if new_c > 0:
add(__a , __a , __a )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
else:
SCREAMING_SNAKE_CASE_ : List[Any] = {c: []}
SCREAMING_SNAKE_CASE_ : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = next_term(__a , k - 1 , i + dn , __a )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = compute(__a , __a , i + dn , __a )
diff += _diff
dn += terms_jumped
SCREAMING_SNAKE_CASE_ : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while j < len(__a ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__a , (diff, dn, k) )
return (diff, dn)
def _A (__a , __a , __a , __a ) -> Optional[int]:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__a ):
a_i.extend([0 for _ in range(k - len(__a ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
SCREAMING_SNAKE_CASE_ : Any = i
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = 0, 0, 0
for j in range(len(__a ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
SCREAMING_SNAKE_CASE_ : Dict = ds_c + ds_b
diff += addend
SCREAMING_SNAKE_CASE_ : Tuple = 0
for j in range(__a ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = a_i[j] + addend
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = divmod(__a , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__a , __a , __a )
return diff, i - start_i
def _A (__a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
for j in range(__a , len(__a ) ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = digits[j] + addend
if s >= 10:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = divmod(__a , 10 )
SCREAMING_SNAKE_CASE_ : Optional[int] = addend // 10 + quotient
else:
SCREAMING_SNAKE_CASE_ : Tuple = s
SCREAMING_SNAKE_CASE_ : Tuple = addend // 10
if addend == 0:
break
while addend > 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = divmod(__a , 10 )
digits.append(__a )
def _A (__a = 10**15 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [1]
SCREAMING_SNAKE_CASE_ : Dict = 1
SCREAMING_SNAKE_CASE_ : Dict = 0
while True:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = next_term(__a , 20 , i + dn , __a )
dn += terms_jumped
if dn == n - i:
break
SCREAMING_SNAKE_CASE_ : List[str] = 0
for j in range(len(__a ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 91 |
"""simple docstring"""
def _A (__a = 50 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 91 | 1 |
'''simple docstring'''
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCAmelCase : Dict =TypeVar('''T''')
class a_ ( Generic[T] ):
def __init__( self : Any , lowercase : bool = True ):
"""simple docstring"""
lowercase_ :dict[T, list[T]] = {} # dictionary of lists
lowercase_ :List[Any] = directed
def lowercase__ ( self : Any , lowercase : T , lowercase : T ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase_ )
self.adj_list[destination_vertex].append(UpperCAmelCase_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase_ )
lowercase_ :Any = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(UpperCAmelCase_ )
lowercase_ :Tuple = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase_ :Tuple = [destination_vertex]
lowercase_ :Optional[Any] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase_ )
lowercase_ :str = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase_ :Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase_ :Dict = [destination_vertex]
lowercase_ :Optional[Any] = []
return self
def __repr__( self : Dict ):
"""simple docstring"""
return pformat(self.adj_list )
| 365 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Tuple =logging.get_logger(__name__)
lowerCAmelCase : str ={
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class a_ ( _lowerCAmelCase ):
__A = "cvt"
def __init__( self : Tuple , lowercase : str=3 , lowercase : str=[7, 3, 3] , lowercase : List[str]=[4, 2, 2] , lowercase : Dict=[2, 1, 1] , lowercase : int=[64, 192, 384] , lowercase : Dict=[1, 3, 6] , lowercase : Dict=[1, 2, 10] , lowercase : Any=[4.0, 4.0, 4.0] , lowercase : Tuple=[0.0, 0.0, 0.0] , lowercase : List[str]=[0.0, 0.0, 0.0] , lowercase : List[str]=[0.0, 0.0, 0.1] , lowercase : Any=[True, True, True] , lowercase : Any=[False, False, True] , lowercase : Optional[Any]=["dw_bn", "dw_bn", "dw_bn"] , lowercase : int=[3, 3, 3] , lowercase : str=[1, 1, 1] , lowercase : List[Any]=[2, 2, 2] , lowercase : Tuple=[1, 1, 1] , lowercase : Optional[Any]=[1, 1, 1] , lowercase : str=0.02 , lowercase : str=1e-1_2 , **lowercase : str , ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :List[Any] = num_channels
lowercase_ :Union[str, Any] = patch_sizes
lowercase_ :Tuple = patch_stride
lowercase_ :List[Any] = patch_padding
lowercase_ :List[Any] = embed_dim
lowercase_ :Union[str, Any] = num_heads
lowercase_ :Any = depth
lowercase_ :str = mlp_ratio
lowercase_ :List[str] = attention_drop_rate
lowercase_ :List[Any] = drop_rate
lowercase_ :Union[str, Any] = drop_path_rate
lowercase_ :Any = qkv_bias
lowercase_ :Dict = cls_token
lowercase_ :int = qkv_projection_method
lowercase_ :Union[str, Any] = kernel_qkv
lowercase_ :Optional[Any] = padding_kv
lowercase_ :Optional[Any] = stride_kv
lowercase_ :Dict = padding_q
lowercase_ :Any = stride_q
lowercase_ :Dict = initializer_range
lowercase_ :Optional[Any] = layer_norm_eps
| 147 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : jnp.ndarray
@flax_register_to_config
class SCREAMING_SNAKE_CASE__ ( nn.Module ,UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = 3_2
_UpperCAmelCase : int = 4
_UpperCAmelCase : int = 4
_UpperCAmelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCAmelCase : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_UpperCAmelCase : Union[bool, Tuple[bool]] = False
_UpperCAmelCase : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_UpperCAmelCase : int = 2
_UpperCAmelCase : Union[int, Tuple[int]] = 8
_UpperCAmelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCAmelCase : int = 1_2_8_0
_UpperCAmelCase : float = 0.0
_UpperCAmelCase : bool = False
_UpperCAmelCase : jnp.dtype = jnp.floataa
_UpperCAmelCase : bool = True
_UpperCAmelCase : int = 0
_UpperCAmelCase : bool = False
def A ( self : List[Any] , lowercase : jax.random.KeyArray ):
'''simple docstring'''
_snake_case = (1, self.in_channels, self.sample_size, self.sample_size)
_snake_case = jnp.zeros(lowercase , dtype=jnp.floataa )
_snake_case = jnp.ones((1,) , dtype=jnp.intaa )
_snake_case = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_snake_case , _snake_case = jax.random.split(lowercase )
_snake_case = {'params': params_rng, 'dropout': dropout_rng}
return self.init(lowercase , lowercase , lowercase , lowercase )["params"]
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.block_out_channels
_snake_case = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_snake_case = self.num_attention_heads or self.attention_head_dim
# input
_snake_case = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_snake_case = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_snake_case = FlaxTimestepEmbedding(lowercase , dtype=self.dtype )
_snake_case = self.only_cross_attention
if isinstance(lowercase , lowercase ):
_snake_case = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase , lowercase ):
_snake_case = (num_attention_heads,) * len(self.down_block_types )
# down
_snake_case = []
_snake_case = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_snake_case = output_channel
_snake_case = block_out_channels[i]
_snake_case = i == len(lowercase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_snake_case = FlaxCrossAttnDownBlockaD(
in_channels=lowercase , out_channels=lowercase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_snake_case = FlaxDownBlockaD(
in_channels=lowercase , out_channels=lowercase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase )
_snake_case = down_blocks
# mid
_snake_case = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_snake_case = []
_snake_case = list(reversed(lowercase ) )
_snake_case = list(reversed(lowercase ) )
_snake_case = list(reversed(lowercase ) )
_snake_case = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_snake_case = output_channel
_snake_case = reversed_block_out_channels[i]
_snake_case = reversed_block_out_channels[min(i + 1 , len(lowercase ) - 1 )]
_snake_case = i == len(lowercase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_snake_case = FlaxCrossAttnUpBlockaD(
in_channels=lowercase , out_channels=lowercase , prev_output_channel=lowercase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_snake_case = FlaxUpBlockaD(
in_channels=lowercase , out_channels=lowercase , prev_output_channel=lowercase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowercase )
_snake_case = output_channel
_snake_case = up_blocks
# out
_snake_case = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_snake_case = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[Any] , lowercase : List[Any] , lowercase : str , lowercase : Any , lowercase : Any=None , lowercase : Tuple=None , lowercase : bool = True , lowercase : bool = False , ):
'''simple docstring'''
if not isinstance(lowercase , jnp.ndarray ):
_snake_case = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowercase , jnp.ndarray ) and len(timesteps.shape ) == 0:
_snake_case = timesteps.astype(dtype=jnp.floataa )
_snake_case = jnp.expand_dims(lowercase , 0 )
_snake_case = self.time_proj(lowercase )
_snake_case = self.time_embedding(lowercase )
# 2. pre-process
_snake_case = jnp.transpose(lowercase , (0, 2, 3, 1) )
_snake_case = self.conv_in(lowercase )
# 3. down
_snake_case = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase , lowercase ):
_snake_case , _snake_case = down_block(lowercase , lowercase , lowercase , deterministic=not train )
else:
_snake_case , _snake_case = down_block(lowercase , lowercase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_snake_case = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowercase , lowercase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_snake_case = new_down_block_res_samples
# 4. mid
_snake_case = self.mid_block(lowercase , lowercase , lowercase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_snake_case = down_block_res_samples[-(self.layers_per_block + 1) :]
_snake_case = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowercase , lowercase ):
_snake_case = up_block(
lowercase , temb=lowercase , encoder_hidden_states=lowercase , res_hidden_states_tuple=lowercase , deterministic=not train , )
else:
_snake_case = up_block(lowercase , temb=lowercase , res_hidden_states_tuple=lowercase , deterministic=not train )
# 6. post-process
_snake_case = self.conv_norm_out(lowercase )
_snake_case = nn.silu(lowercase )
_snake_case = self.conv_out(lowercase )
_snake_case = jnp.transpose(lowercase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowercase ) | 282 |
import numpy as np
def a_ ( __lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = RoCBertTokenizer
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = filter_non_english
def _UpperCamelCase ( self ):
super().setUp()
UpperCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
UpperCAmelCase = {}
UpperCAmelCase = {}
for i, value in enumerate(A ):
UpperCAmelCase = i
UpperCAmelCase = i
UpperCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""word_shape_file"""] )
UpperCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file ,"""w""" ,encoding="""utf-8""" ) as word_shape_writer:
json.dump(A ,A ,ensure_ascii=A )
with open(self.word_pronunciation_file ,"""w""" ,encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(A ,A ,ensure_ascii=A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file )
UpperCAmelCase = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(A ,["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A ) ,[5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A ) ,[5, 6, 2, 5, 7, 8] )
def _UpperCamelCase ( self ):
UpperCAmelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A ,strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A ,strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A ,strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A ,strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
UpperCAmelCase = {}
for i, token in enumerate(A ):
UpperCAmelCase = i
UpperCAmelCase = RoCBertWordpieceTokenizer(vocab=A ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def _UpperCamelCase ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def _UpperCamelCase ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def _UpperCamelCase ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
UpperCAmelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
def _UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A ,**A )
UpperCAmelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
UpperCAmelCase = tokenizer_r.encode_plus(
A ,return_attention_mask=A ,return_token_type_ids=A ,return_offsets_mapping=A ,add_special_tokens=A ,)
UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(A ,"""do_lower_case""" ) else False
UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""的""", """人""", """有"""]
UpperCAmelCase = """""".join(A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = True
UpperCAmelCase = self.tokenizer_class.from_pretrained(A ,**A )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A ,**A )
UpperCAmelCase = tokenizer_p.encode(A ,add_special_tokens=A )
UpperCAmelCase = tokenizer_r.encode(A ,add_special_tokens=A )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(A )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A ,A )
self.assertListEqual(A ,A )
UpperCAmelCase = False
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A ,**A )
UpperCAmelCase = self.tokenizer_class.from_pretrained(A ,**A )
UpperCAmelCase = tokenizer_r.encode(A ,add_special_tokens=A )
UpperCAmelCase = tokenizer_p.encode(A ,add_special_tokens=A )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(A )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(A )
]
self.assertListEqual(A ,A )
self.assertListEqual(A ,A )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file )
UpperCAmelCase = tokenizer.encode("""你好""" ,add_special_tokens=A )
UpperCAmelCase = tokenizer.encode("""你是谁""" ,add_special_tokens=A )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A ,A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_tokenizers(do_lower_case=A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase = """你好,你是谁"""
UpperCAmelCase = tokenizer.tokenize(A )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(A )
UpperCAmelCase = tokenizer.convert_tokens_to_shape_ids(A )
UpperCAmelCase = tokenizer.convert_tokens_to_pronunciation_ids(A )
UpperCAmelCase = tokenizer.prepare_for_model(
A ,A ,A ,add_special_tokens=A )
UpperCAmelCase = tokenizer.encode_plus(A ,add_special_tokens=A )
self.assertEqual(A ,A )
| 234 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_UpperCamelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
_UpperCamelCase = json.load(f)
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ,A ):
return FSMTTokenizer.from_pretrained(A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = FSMTForConditionalGeneration.from_pretrained(A ).to(A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _UpperCamelCase ( self ,A ,A ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase = F'''facebook/wmt19-{pair}'''
UpperCAmelCase = self.get_tokenizer(A )
UpperCAmelCase = self.get_model(A )
UpperCAmelCase = bleu_data[pair]["""src"""]
UpperCAmelCase = bleu_data[pair]["""tgt"""]
UpperCAmelCase = tokenizer(A ,return_tensors="""pt""" ,truncation=A ,padding="""longest""" ).to(A )
UpperCAmelCase = model.generate(
input_ids=batch.input_ids ,num_beams=8 ,)
UpperCAmelCase = tokenizer.batch_decode(
A ,skip_special_tokens=A ,clean_up_tokenization_spaces=A )
UpperCAmelCase = calculate_bleu(A ,A )
print(A )
self.assertGreaterEqual(scores["""bleu"""] ,A )
| 234 | 1 |
from statistics import mean
import numpy as np
def __UpperCamelCase ( _A , _A , _A , _A ):
lowerCAmelCase_ = 0
# Number of processes finished
lowerCAmelCase_ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCAmelCase_ = [0] * no_of_process
# List to include calculation results
lowerCAmelCase_ = [0] * no_of_process
# Sort by arrival time.
lowerCAmelCase_ = [burst_time[i] for i in np.argsort(_A )]
lowerCAmelCase_ = [process_name[i] for i in np.argsort(_A )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCAmelCase_ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCAmelCase_ = arrival_time[i]
lowerCAmelCase_ = 0
# Index showing the location of the process being performed
lowerCAmelCase_ = 0
# Saves the current response ratio.
lowerCAmelCase_ = 0
for i in range(0 , _A ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCAmelCase_ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCAmelCase_ = temp
lowerCAmelCase_ = i
# Calculate the turn around time
lowerCAmelCase_ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCAmelCase_ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __UpperCamelCase ( _A , _A , _A , _A ):
lowerCAmelCase_ = [0] * no_of_process
for i in range(0 , _A ):
lowerCAmelCase_ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_A = 5
_A = ['''A''', '''B''', '''C''', '''D''', '''E''']
_A = [1, 2, 3, 4, 5]
_A = [1, 2, 3, 4, 5]
_A = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_A = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
f"{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"
f"{turn_around_time[i]}\t\t\t{waiting_time[i]}"
)
print(f"average waiting time : {mean(waiting_time):.5f}")
print(f"average turn around time : {mean(turn_around_time):.5f}")
| 278 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
lowerCAmelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCAmelCase_ = [4, 4, 4, 4]
lowerCAmelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
else:
lowerCAmelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCAmelCase_ = 96
elif "small" in model_name:
lowerCAmelCase_ = 96
elif "base" in model_name:
lowerCAmelCase_ = 128
elif "large" in model_name:
lowerCAmelCase_ = 192
elif "xlarge" in model_name:
lowerCAmelCase_ = 256
elif "huge" in model_name:
lowerCAmelCase_ = 352
# set label information
lowerCAmelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCAmelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = FocalNetConfig(
embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , )
return config
def __UpperCamelCase ( _A ):
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowerCAmelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCAmelCase_ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowerCAmelCase_ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowerCAmelCase_ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowerCAmelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCAmelCase_ = name.replace('''head''' , '''classifier''' )
else:
lowerCAmelCase_ = '''focalnet.''' + name
return name
def __UpperCamelCase ( _A , _A , _A=False ):
# fmt: off
lowerCAmelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCAmelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _A )
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
lowerCAmelCase_ = val
lowerCAmelCase_ = get_focalnet_config(_A )
lowerCAmelCase_ = FocalNetForImageClassification(_A )
model.eval()
# load state dict
model.load_state_dict(_A )
# verify conversion
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = BitImageProcessor(
do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , )
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' )
lowerCAmelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCAmelCase_ = image_transforms(_A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 )
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowerCAmelCase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowerCAmelCase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowerCAmelCase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowerCAmelCase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowerCAmelCase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
_A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( a , a , a , a , a , ) -> None:
_A: Any = len(a )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(a ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , a , a , )
def lowerCamelCase__ ( a ) -> None:
_A: list[list[str]] = []
depth_first_search([] , [] , [] , a , a )
# Print all the boards
for board in boards:
for column in board:
print(a )
print('''''' )
print(len(a ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 301 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase :
'''simple docstring'''
__UpperCamelCase : Any = MBartConfig
__UpperCamelCase : Tuple = {}
__UpperCamelCase : Dict = '''gelu'''
def __init__( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Union[str, Any]=9_9 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Union[str, Any]=3_7 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : List[str]=2_0 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : List[Any]=0 , ):
"""simple docstring"""
_A: Union[str, Any] = parent
_A: List[Any] = batch_size
_A: Dict = seq_length
_A: Dict = is_training
_A: str = use_labels
_A: int = vocab_size
_A: str = hidden_size
_A: Tuple = num_hidden_layers
_A: Optional[Any] = num_attention_heads
_A: Tuple = intermediate_size
_A: int = hidden_dropout_prob
_A: Tuple = attention_probs_dropout_prob
_A: Tuple = max_position_embeddings
_A: Dict = eos_token_id
_A: int = pad_token_id
_A: Any = bos_token_id
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_A: Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_A: List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_A: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A: int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_A: Any = prepare_mbart_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: Tuple = TFMBartModel(config=lowerCAmelCase_ ).get_decoder()
_A: List[str] = inputs_dict['''input_ids''']
_A: Tuple = input_ids[:1, :]
_A: List[Any] = inputs_dict['''attention_mask'''][:1, :]
_A: str = inputs_dict['''head_mask''']
_A: Optional[Any] = 1
# first forward pass
_A: Any = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_A , _A: List[str] = outputs.to_tuple()
_A: Dict = past_key_values[1]
def lowerCamelCase__ ( a , a , a , a=None , a=None , a=None , a=None , a=None , ) -> Tuple:
if attention_mask is None:
_A: Union[str, Any] = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_A: Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_A: Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_A: Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_A: Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCamelCase : int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : Tuple = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : List[Any] = True
__UpperCamelCase : int = False
__UpperCamelCase : Optional[Any] = False
def __magic_name__ ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Dict = TFMBartModelTester(self )
_A: Tuple = ConfigTester(self , config_class=lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
__UpperCamelCase : List[str] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
__UpperCamelCase : Union[str, Any] = '''facebook/mbart-large-en-ro'''
@cached_property
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __magic_name__ ( self : Union[str, Any] , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Optional[Any] = self.translate_src_text(**lowerCAmelCase_ )
self.assertListEqual(self.expected_text , lowerCAmelCase_ )
def __magic_name__ ( self : Dict , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Any = self.tokenizer(self.src_text , **lowerCAmelCase_ , return_tensors='''tf''' )
_A: Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_A: Optional[Any] = self.tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
return generated_words
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 301 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
if (ksize % 2) == 0:
snake_case_ = ksize + 1
snake_case_ = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCAmelCase ):
for x in range(_lowerCAmelCase ):
# distance from center
snake_case_ = x - ksize // 2
snake_case_ = y - ksize // 2
# degree to radiant
snake_case_ = theta / 180 * np.pi
snake_case_ = np.cos(_theta )
snake_case_ = np.sin(_theta )
# get kernel x
snake_case_ = cos_theta * px + sin_theta * py
# get kernel y
snake_case_ = -sin_theta * px + cos_theta * py
# fill kernel
snake_case_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__SCREAMING_SNAKE_CASE : List[str] = imread('../image_data/lena.jpg')
# turn image in gray scale value
__SCREAMING_SNAKE_CASE : Any = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__SCREAMING_SNAKE_CASE : List[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__SCREAMING_SNAKE_CASE : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__SCREAMING_SNAKE_CASE : Any = out / out.max() * 255
__SCREAMING_SNAKE_CASE : Optional[int] = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 347 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =0
@slow
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsNotNone(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(_lowerCAmelCase) , 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsNotNone(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (GPTaTokenizer, GPTaTokenizerFast))
self.assertGreater(len(_lowerCAmelCase) , 0)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 1_2)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 2_0)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =AutoConfig.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
# Check that tokenizer_type ≠ model_type
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 1_2)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_lowerCAmelCase , 'vocab.txt'))
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='bert' , use_fast=_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_lowerCAmelCase , 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_lowerCAmelCase , 'merges.txt'))
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='gpt2' , use_fast=_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
@require_tokenizers
def __lowerCamelCase ( self : int):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_lowerCAmelCase , 'vocab.txt'))
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='bert')
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_lowerCAmelCase , 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_lowerCAmelCase , 'merges.txt'))
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='gpt2')
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
with pytest.raises(_lowerCAmelCase):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx')
@require_tokenizers
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__lowercase =tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased')
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
if isinstance(_lowerCAmelCase , _lowerCAmelCase):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _lowerCAmelCase)
else:
self.assertEqual(tokenizer.do_lower_case , _lowerCAmelCase)
self.assertEqual(tokenizer.model_max_length , 5_1_2)
@require_tokenizers
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_lowerCAmelCase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
__lowercase =tokenizer_class.from_pretrained('julien-c/herlolip-not-exists')
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =TOKENIZER_MAPPING.values()
__lowercase =[]
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__)
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_lowerCAmelCase)
@require_tokenizers
def __lowerCamelCase ( self : int):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_lowerCAmelCase) , _lowerCAmelCase)
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased') , _lowerCAmelCase)
@require_tokenizers
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=_lowerCAmelCase)
__lowercase ='Hello, world. How are you?'
__lowercase =tokenizer.tokenize(_lowerCAmelCase)
self.assertEqual('[UNK]' , tokens[0])
__lowercase =AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=_lowerCAmelCase)
__lowercase =tokenizer.tokenize(_lowerCAmelCase)
self.assertEqual('[UNK]' , tokens[0])
@require_tokenizers
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config')
self.assertEqual(type(_lowerCAmelCase) , _lowerCAmelCase)
self.assertEqual(tokenizer.model_max_length , 5_1_2)
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0)
self.assertEqual(tokenizer.unk_token , '[UNK]')
self.assertEqual(tokenizer.padding_side , 'right')
self.assertEqual(tokenizer.truncation_side , 'right')
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , tokenizer.__class__)
self.assertEqual(tokenizera.vocab_size , 1_2)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained('ctrl')
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =get_tokenizer_config('bert-base-cased')
__lowercase =config.pop('_commit_hash' , _lowerCAmelCase)
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_lowerCAmelCase , {'do_lower_case': False})
# This model does not have a tokenizer_config so we get back an empty dict.
__lowercase =get_tokenizer_config(_lowerCAmelCase)
self.assertDictEqual(_lowerCAmelCase , {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =get_tokenizer_config(_lowerCAmelCase)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer')
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
try:
AutoConfig.register('custom' , _lowerCAmelCase)
AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase):
AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase)
__lowercase =CustomTokenizer.from_pretrained(_lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __lowerCamelCase ( self : int):
'''simple docstring'''
try:
AutoConfig.register('custom' , _lowerCAmelCase)
# Can register in two steps
AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None))
AutoTokenizer.register(_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase):
AutoTokenizer.register(_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase =BertTokenizerFast.from_pretrained(_lowerCAmelCase)
bert_tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =CustomTokenizerFast.from_pretrained(_lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
with self.assertRaises(_lowerCAmelCase):
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase):
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer')
@require_tokenizers
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = False
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = NewTokenizer
lowerCAmelCase__ = False
try:
AutoConfig.register('custom' , _lowerCAmelCase)
AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase)
AutoTokenizer.register(_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase)
# If remote code is not set, the default is to use local
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote code is disabled, we load the local one.
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertTrue(tokenizer.special_attribute_present)
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_lowerCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
with self.assertRaisesRegex(
_lowerCAmelCase , 'bert-base is not a local folder and is not a valid model identifier'):
__lowercase =AutoTokenizer.from_pretrained('bert-base')
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
with self.assertRaisesRegex(
_lowerCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , revision='aaaaaa')
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 166 | 0 |
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCamelCase ( lowercase_ ):
lowercase = 'M-CLIP'
def __init__( self ,__UpperCamelCase=1024 ,__UpperCamelCase=768 ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = transformerDimSize
lowercase_ : List[Any] = imageDimSize
super().__init__(**__UpperCamelCase )
class UpperCamelCase ( lowercase_ ):
lowercase = MCLIPConfig
def __init__( self ,__UpperCamelCase ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(__UpperCamelCase ,*__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : str = XLMRobertaModel(__UpperCamelCase )
lowercase_ : Tuple = torch.nn.Linear(
in_features=config.transformerDimensions ,out_features=config.numDims )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = self.transformer(input_ids=__UpperCamelCase ,attention_mask=__UpperCamelCase )[0]
lowercase_ : str = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__UpperCamelCase ), embs
| 321 | """simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = name
lowercase_ : int = val
def __str__( self ) -> Tuple:
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return self.val < other.val
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = {}
lowercase_ : Tuple = {}
lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase )
def __getitem__( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
return self.get_value(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return (idx - 1) // 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return idx * 2 + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return idx * 2 + 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return self.heap_dict[key]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1
lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
lowercase_ : Any = idx
lowercase_ : str = i.val
for i in range(__UpperCamelCase ,-1 ,-1 ):
self.sift_down(__UpperCamelCase ,__UpperCamelCase )
return array
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
while True:
lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase )
lowercase_ : List[str] = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
lowercase_ : List[str] = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
lowercase_ : Dict = r
if smallest != idx:
lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx]
(
(
lowercase_
) , (
lowercase_
) ,
) : str = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase_ : Any = smallest
else:
break
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase_ : int = p
lowercase_ : str = self.get_parent_idx(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self.heap[0]
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase_ : Tuple = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap )
return x
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
self.heap.append(__UpperCamelCase )
lowercase_ : Tuple = len(self.heap ) - 1
lowercase_ : Optional[int] = node.val
self.sift_up(len(self.heap ) - 1 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.heap ) == 0
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase_ : Any = new_value
lowercase_ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
__SCREAMING_SNAKE_CASE =Node("R", -1)
__SCREAMING_SNAKE_CASE =Node("B", 6)
__SCREAMING_SNAKE_CASE =Node("A", 3)
__SCREAMING_SNAKE_CASE =Node("X", 1)
__SCREAMING_SNAKE_CASE =Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321 | 1 |
class __snake_case :
def __init__( self : List[Any] , _snake_case : Tuple = "" , _snake_case : Tuple = False):
"""simple docstring"""
UpperCAmelCase_ = {}
# A node will be a leaf if the tree contains its word
UpperCAmelCase_ = is_leaf
UpperCAmelCase_ = prefix
def lowerCamelCase ( self : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = 0
for q, w in zip(self.prefix , _snake_case):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowerCamelCase ( self : int , _snake_case : Tuple):
"""simple docstring"""
for word in words:
self.insert(_snake_case)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : str):
"""simple docstring"""
if self.prefix == word:
UpperCAmelCase_ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCAmelCase_ = RadixNode(prefix=_snake_case , is_leaf=_snake_case)
else:
UpperCAmelCase_ = self.nodes[word[0]]
UpperCAmelCase_ = incoming_node.match(
_snake_case)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_snake_case)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCAmelCase_ = remaining_prefix
UpperCAmelCase_ = self.nodes[matching_string[0]]
UpperCAmelCase_ = RadixNode(_snake_case , _snake_case)
UpperCAmelCase_ = aux_node
if remaining_word == "":
UpperCAmelCase_ = True
else:
self.nodes[matching_string[0]].insert(_snake_case)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.nodes.get(word[0] , _snake_case)
if not incoming_node:
return False
else:
UpperCAmelCase_ = incoming_node.match(
_snake_case)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_snake_case)
def lowerCamelCase ( self : int , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = self.nodes.get(word[0] , _snake_case)
if not incoming_node:
return False
else:
UpperCAmelCase_ = incoming_node.match(
_snake_case)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_snake_case)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
UpperCAmelCase_ = list(self.nodes.values())[0]
UpperCAmelCase_ = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCAmelCase_ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
UpperCAmelCase_ = False
# If there is 1 edge, we merge it with its child
else:
UpperCAmelCase_ = list(incoming_node.nodes.values())[0]
UpperCAmelCase_ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCAmelCase_ = merging_node.nodes
return True
def lowerCamelCase ( self : str , _snake_case : int = 0):
"""simple docstring"""
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''')
for value in self.nodes.values():
value.print_tree(height + 1)
def A () -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = """banana bananas bandana band apple all beast""".split()
UpperCAmelCase_ = RadixNode()
root.insert_many(lowerCamelCase__ )
assert all(root.find(lowerCamelCase__ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def A () -> List[str]:
"""simple docstring"""
assert test_trie()
def A () -> str:
"""simple docstring"""
UpperCAmelCase_ = RadixNode()
UpperCAmelCase_ = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(lowerCamelCase__ )
print('''Words:''' , lowerCamelCase__ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 51 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
A_ : str = parent
A_ : int = batch_size
A_ : Dict = seq_length
A_ : Any = is_training
A_ : List[str] = use_input_mask
A_ : Any = use_token_type_ids
A_ : int = use_labels
A_ : str = vocab_size
A_ : Any = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : Dict = intermediate_size
A_ : Optional[int] = hidden_act
A_ : int = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : Any = type_sequence_label_size
A_ : Tuple = initializer_range
A_ : int = num_labels
A_ : Optional[int] = num_choices
A_ : Optional[int] = scope
def _a (self ):
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : List[str] = None
if self.use_input_mask:
A_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A_ : int = None
if self.use_token_type_ids:
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Tuple = None
A_ : List[str] = None
A_ : List[str] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : str = ids_tensor([self.batch_size] , self.num_choices )
A_ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a (self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : List[Any] = BioGptModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : List[Any] = model(lowercase , attention_mask=lowercase )
A_ : str = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ):
A_ : List[Any] = BioGptForCausalLM(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Dict = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ):
A_ : Optional[Any] = BioGptModel(config=lowercase )
model.to(lowercase )
model.eval()
# create attention mask
A_ : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase )
A_ : Optional[Any] = self.seq_length // 2
A_ : List[Any] = 0
# first forward pass
A_, A_ : List[str] = model(lowercase , attention_mask=lowercase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A_ : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A_ : Union[str, Any] = ids_tensor((1,) , lowercase ).item() + 1
A_ : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A_ : Optional[int] = random_other_next_tokens
# append to next input_ids and attn_mask
A_ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ : Any = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase )] , dim=1 , )
# get two different outputs
A_ : List[Any] = model(lowercase , attention_mask=lowercase )["""last_hidden_state"""]
A_ : Optional[int] = model(lowercase , past_key_values=lowercase , attention_mask=lowercase )["""last_hidden_state"""]
# select random slice
A_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ : int = output_from_no_past[:, -1, random_slice_idx].detach()
A_ : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-3 ) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ):
A_ : Optional[int] = BioGptModel(config=lowercase ).to(lowercase ).eval()
A_ : Union[str, Any] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase )
# first forward pass
A_ : int = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
A_, A_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A_ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A_ : str = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A_ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ : int = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A_ : List[str] = model(lowercase , attention_mask=lowercase )["""last_hidden_state"""]
A_ : Tuple = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[
"""last_hidden_state"""
]
# select random slice
A_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-3 ) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase , lowercase=False ):
A_ : Union[str, Any] = BioGptForCausalLM(lowercase )
model.to(lowercase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A_ : Dict = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _a (self , lowercase , *lowercase ):
A_ : Union[str, Any] = BioGptModel(lowercase )
A_ : str = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ):
A_ : Union[str, Any] = self.num_labels
A_ : Optional[int] = BioGptForTokenClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Dict = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a (self ):
A_ : List[Any] = self.prepare_config_and_inputs()
(
(
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
),
) : Dict = config_and_inputs
A_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : int = (BioGptForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[Any] = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : List[str] = False
def _a (self ):
A_ : Tuple = BioGptModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
def _a (self ):
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : int = type
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase )
def _a (self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase , gradient_checkpointing=lowercase )
def _a (self ):
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase )
def _a (self ):
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase )
def _a (self ):
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase )
@slow
def _a (self ):
A_ : str = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(lowercase )
A_ : List[str] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A_ : List[str] = """left"""
# Define PAD Token = EOS Token = 50256
A_ : Any = tokenizer.eos_token
A_ : Dict = model.config.eos_token_id
# use different length sentences to test batching
A_ : List[Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A_ : List[str] = tokenizer(lowercase , return_tensors="""pt""" , padding=lowercase )
A_ : List[str] = inputs["""input_ids"""].to(lowercase )
A_ : List[Any] = model.generate(
input_ids=lowercase , attention_mask=inputs["""attention_mask"""].to(lowercase ) , )
A_ : List[str] = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(lowercase )
A_ : List[Any] = model.generate(input_ids=lowercase )
A_ : List[str] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
A_ : Union[str, Any] = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(lowercase )
A_ : Any = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings )
A_ : List[str] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
A_ : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase )
A_ : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase )
A_ : Union[str, Any] = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] )
@slow
def _a (self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : int = BioGptModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def _a (self ):
A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = 3
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(lowercase )
A_ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A_ : Union[str, Any] = BioGptForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Tuple = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a (self ):
A_, A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = 3
A_ : Dict = """multi_label_classification"""
A_ : List[Any] = input_dict["""input_ids"""]
A_ : Tuple = input_ids.ne(1 ).to(lowercase )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Dict = BioGptForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : int = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
A_ : Dict = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
A_ : Optional[Any] = torch.tensor([[2, 4805, 9, 656, 21]] )
A_ : Dict = model(lowercase )[0]
A_ : Any = 42384
A_ : Union[str, Any] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowercase )
A_ : Union[str, Any] = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) )
@slow
def _a (self ):
A_ : Union[str, Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A_ : Dict = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(lowercase )
torch.manual_seed(0 )
A_ : Dict = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(lowercase )
A_ : Optional[int] = model.generate(
**lowercase , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=lowercase , )
A_ : Union[str, Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase )
A_ : Any = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(lowercase , lowercase ) | 206 | 0 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_UpperCamelCase = get_tests_dir('''fixtures''')
class _A ( unittest.TestCase ):
def __A ( self ) -> List[str]:
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
__UpperCAmelCase : Optional[Any] = mock.Mock()
__UpperCAmelCase : Any = 500
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : int = HTTPError
__UpperCAmelCase : Tuple = {}
# Download this model to make sure it's in the cache.
__UpperCAmelCase : str = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=_SCREAMING_SNAKE_CASE ) as mock_head:
__UpperCAmelCase : Any = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self ) -> Dict:
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
__UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def __A ( self ) -> str:
'''simple docstring'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# config is in subfolder, the following should not work without specifying the subfolder
__UpperCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
__UpperCAmelCase : int = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@is_staging_test
class _A ( unittest.TestCase ):
@classmethod
def __A ( cls ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def __A ( cls ) -> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = ViTImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
__UpperCAmelCase : str = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id="""test-image-processor""" , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
__UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : int = ViTImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
__UpperCAmelCase : Optional[Any] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
__UpperCAmelCase : Dict = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A ( self ) -> Dict:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
__UpperCAmelCase : List[Any] = CustomImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
__UpperCAmelCase : List[str] = AutoImageProcessor.from_pretrained(
f'{USER}/test-dynamic-image-processor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 363 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 16 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ ( __a ):
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[str]=13 , _A : Any=7 , _A : str=True , _A : Any=True , _A : Any=True , _A : Optional[int]=True , _A : int=99 , _A : Optional[int]=32 , _A : List[Any]=5 , _A : Optional[Any]=4 , _A : Dict=37 , _A : Any="gelu" , _A : str=0.1 , _A : int=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=16 , _A : List[Any]=2 , _A : str=0.0_2 , _A : Optional[Any]=False , _A : Any=True , _A : Dict="None" , _A : List[str]=3 , _A : List[str]=4 , _A : Tuple=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Dict = is_training
UpperCAmelCase__ : Optional[Any] = use_input_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Union[str, Any] = use_labels
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : Any = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : Any = attention_probs_dropout_prob
UpperCAmelCase__ : int = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Any = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : List[Any] = relative_attention
UpperCAmelCase__ : int = position_biased_input
UpperCAmelCase__ : str = pos_att_type
UpperCAmelCase__ : Union[str, Any] = scope
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase__ : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Dict = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase_ ( self : Dict , _A : Optional[int] ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase_ ( self : int , _A : int , _A : Any , _A : Tuple , _A : List[Any] , _A : str , _A : Union[str, Any] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DebertaVaModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : str = model(_A , attention_mask=_A , token_type_ids=_A )[0]
UpperCAmelCase__ : List[str] = model(_A , token_type_ids=_A )[0]
UpperCAmelCase__ : List[Any] = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : List[Any] , _A : Optional[Any] , _A : int , _A : List[Any] , _A : Optional[int] , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = DebertaVaForMaskedLM(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : str , _A : str , _A : Any , _A : Any , _A : List[Any] , _A : Dict , _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = DebertaVaForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : str = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def lowercase_ ( self : Any , _A : List[str] , _A : List[str] , _A : Optional[int] , _A : Tuple , _A : Dict , _A : List[str] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : int = DebertaVaForTokenClassification(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : str , _A : List[str] , _A : str , _A : Optional[int] , _A : Optional[int] , _A : Union[str, Any] , _A : Dict , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = DebertaVaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : int = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Any , _A : Tuple , _A : Optional[int] , _A : Optional[int] , _A : str , _A : List[str] , _A : Any , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DebertaVaForMultipleChoice(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : List[str] = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Any = config_and_inputs
UpperCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DebertaVaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = DebertaVaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
UpperCAmelCase__ : List[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
UpperCAmelCase__ : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 181 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
UpperCamelCase__ = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
UpperCamelCase__ = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="dummy_doc" ) -> Dict:
UpperCAmelCase__ : List[str] = {doc: key_lines}
UpperCAmelCase__ : int = {doc: sys_lines}
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = reader.get_doc_mentions(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase__ : int = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : str = reader.get_doc_mentions(lowerCAmelCase__ , sys_doc_lines[doc] , lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase__ : str = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
if remove_nested:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCAmelCase__ : Dict = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : str = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'''Number of resulting singleton clusters in the key '''
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'''files, respectively''' )
return doc_coref_infos
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
UpperCAmelCase__ : str = get_coref_infos(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Optional[int] = 0
for name, metric in metrics:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = evaluator.evaluate_documents(lowerCAmelCase__ , lowerCAmelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 1_00:.2f}""" , F""" Precision: {precision * 1_00:.2f}""" , F""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
UpperCAmelCase__ : Any = (conll / 3) * 1_00
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : int = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
UpperCAmelCase__ : str = line.split()[5]
if not parse_col == "-":
UpperCAmelCase__ : Tuple = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Tuple , _A : Dict=True , _A : Optional[int]=False , _A : str=False , _A : List[str]=False ):
'''simple docstring'''
UpperCAmelCase__ : Any = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
UpperCAmelCase__ : int = util.check_gold_parse_annotation(_A )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCAmelCase__ : List[str] = evaluate(
key_lines=_A , sys_lines=_A , metrics=_A , NP_only=_A , remove_nested=_A , keep_singletons=_A , min_span=_A , )
return score
| 181 | 1 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
__snake_case = namedtuple('''covid_data''', '''cases deaths recovered''')
def a ( __a = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
'''simple docstring'''
UpperCamelCase__ :Tuple = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) )
__snake_case = '''Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats())) | 370 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 16
__snake_case = 32
def a ( __a , __a = 16 , __a = "bert-base-cased" ) -> Any:
'''simple docstring'''
UpperCamelCase__ :List[str] = AutoTokenizer.from_pretrained(__a )
UpperCamelCase__ :List[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Tuple = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase__ :Optional[int] = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__a , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCamelCase__ :Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
UpperCamelCase__ :str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
def a ( __a , __a , __a , __a ) -> str:
'''simple docstring'''
model.eval()
UpperCamelCase__ :List[str] = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ :int = model(**__a )
UpperCamelCase__ :Tuple = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase__ , UpperCamelCase__ :int = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__a ) - 1:
UpperCamelCase__ :Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase__ :List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__a , references=__a , )
UpperCamelCase__ :Union[str, Any] = metric.compute()
return eval_metric["accuracy"]
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :Any = config['''lr''']
UpperCamelCase__ :Optional[int] = int(config['''num_epochs'''] )
UpperCamelCase__ :List[Any] = int(config['''seed'''] )
UpperCamelCase__ :List[Any] = int(config['''batch_size'''] )
UpperCamelCase__ :List[Any] = args.model_name_or_path
set_seed(__a )
UpperCamelCase__ , UpperCamelCase__ :Any = get_dataloaders(__a , __a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__a , return_dict=__a )
# Instantiate optimizer
UpperCamelCase__ :Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase__ :Optional[Any] = optimizer_cls(params=model.parameters() , lr=__a )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase__ :Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCamelCase__ :Dict = 1
UpperCamelCase__ :Tuple = (len(__a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase__ :Any = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=0 , num_training_steps=__a , )
else:
UpperCamelCase__ :Any = DummyScheduler(__a , total_num_steps=__a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = accelerator.prepare(
__a , __a , __a , __a , __a )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase__ :Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Optional[int] = evaluate.load('''glue''' , '''mrpc''' )
UpperCamelCase__ :List[Any] = num_epochs
if args.partial_train_epoch is not None:
UpperCamelCase__ :Optional[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase__ :Dict = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCamelCase__ :Tuple = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCamelCase__ :Any = int(__a ) + 1
UpperCamelCase__ :Dict = evaluation_loop(__a , __a , __a , __a )
accelerator.print('''resumed checkpoint performance:''' , __a )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.load(__a )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCamelCase__ :Optional[Any] = {}
for epoch in range(__a , __a ):
model.train()
for step, batch in enumerate(__a ):
UpperCamelCase__ :Optional[int] = model(**__a )
UpperCamelCase__ :Optional[int] = outputs.loss
UpperCamelCase__ :str = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCamelCase__ :Union[str, Any] = f'''epoch_{epoch}'''
UpperCamelCase__ :List[Any] = os.path.join(args.output_dir , __a )
accelerator.save_state(__a )
UpperCamelCase__ :List[Any] = evaluation_loop(__a , __a , __a , __a )
UpperCamelCase__ :int = accuracy
UpperCamelCase__ :List[Any] = lr_scheduler.get_lr()[0]
UpperCamelCase__ :Any = optimizer.param_groups[0]['''lr''']
UpperCamelCase__ :int = epoch
UpperCamelCase__ :Tuple = overall_step
accelerator.print(f'''epoch {epoch}:''' , __a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'''state_{epoch}.json''' ) , '''w''' ) as f:
json.dump(__a , __a )
def a ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :List[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__a , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__a , )
parser.add_argument(
'''--output_dir''' , type=__a , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=__a , default=__a , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=__a , default=__a , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=__a , default=2 , help='''Number of train epochs.''' , )
UpperCamelCase__ :Optional[int] = parser.parse_args()
UpperCamelCase__ :List[str] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main() | 219 | 0 |
"""simple docstring"""
import torch
def _SCREAMING_SNAKE_CASE ( ) ->Optional[Any]:
'''simple docstring'''
if torch.cuda.is_available():
a : List[Any] = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 105 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] =ComputeEnvironment.AMAZON_SAGEMAKER
lowerCamelCase : str =True
lowerCamelCase : Union[str, Any] ="""ml.p3.2xlarge"""
lowerCamelCase : str ="""accelerate_sagemaker_execution_role"""
lowerCamelCase : int ="""hf-sm"""
lowerCamelCase : int ="""us-east-1"""
lowerCamelCase : Tuple =1
lowerCamelCase : Any ="""accelerate-sagemaker-1"""
lowerCamelCase : str ="""1.6"""
lowerCamelCase : Tuple ="""4.4"""
lowerCamelCase : Optional[int] ="""train.py"""
lowerCamelCase : Optional[Any] =[
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
lowerCamelCase : Union[str, Any] =[
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> List[str]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
a : str = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , lowerCAmelCase__ )
assert isinstance(converted_args["do_train"] , lowerCAmelCase__ )
assert isinstance(converted_args["epochs"] , lowerCAmelCase__ )
assert isinstance(converted_args["learning_rate"] , lowerCAmelCase__ )
assert isinstance(converted_args["max_steps"] , lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 105 | 1 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Optional[int] = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = AlbertTokenizer
UpperCamelCase = AlbertTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = True
def __magic_name__ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Union[str, Any] = AlbertTokenizer(__A )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : List[Any], __A : Any ):
UpperCAmelCase : Tuple = '''this is a test'''
UpperCAmelCase : Optional[int] = '''this is a test'''
return input_text, output_text
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Optional[Any] = '''<pad>'''
UpperCAmelCase : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ), __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ), __A )
def __magic_name__ ( self : Any ):
UpperCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<pad>''' )
self.assertEqual(vocab_keys[1], '''<unk>''' )
self.assertEqual(vocab_keys[-1], '''▁eloquent''' )
self.assertEqual(len(__A ), 3_0_0_0_0 )
def __magic_name__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size, 3_0_0_0_0 )
def __magic_name__ ( self : Dict ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase : Tuple = self.get_tokenizer()
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : int = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase : int = tokenizer.tokenize(__A )
UpperCAmelCase : Dict = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A, __A )
UpperCAmelCase : List[str] = tokenizer.encode(__A, add_special_tokens=__A )
UpperCAmelCase : List[str] = rust_tokenizer.encode(__A, add_special_tokens=__A )
self.assertListEqual(__A, __A )
UpperCAmelCase : str = self.get_rust_tokenizer()
UpperCAmelCase : List[Any] = tokenizer.encode(__A )
UpperCAmelCase : Dict = rust_tokenizer.encode(__A )
self.assertListEqual(__A, __A )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Dict = AlbertTokenizer(__A, keep_accents=__A )
UpperCAmelCase : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__A, ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), [4_8, 2_5, 2_1, 1_2_8_9] )
UpperCAmelCase : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__A, ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
UpperCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A, [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A, ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''], )
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[Any] = AlbertTokenizer(__A )
UpperCAmelCase : Union[str, Any] = tokenizer.encode('''sequence builders''' )
UpperCAmelCase : Tuple = tokenizer.encode('''multi-sequence build''' )
UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__A )
UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(__A, __A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __magic_name__ ( self : Dict ):
# fmt: off
UpperCAmelCase : List[Any] = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A, model_name='''albert-base-v2''', revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''', )
| 99 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(4_2)
_lowerCamelCase : List[str] = "bert-base-cased"
_lowerCamelCase : str = "fp16"
_lowerCamelCase : Optional[int] = "bf16"
_lowerCamelCase : List[str] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Tuple ):
super().setUp()
UpperCAmelCase : Optional[Any] = dict(
ACCELERATE_USE_FSDP='''true''', MASTER_ADDR='''localhost''', MASTER_PORT='''10999''', RANK='''0''', LOCAL_RANK='''0''', WORLD_SIZE='''1''', )
def __magic_name__ ( self : int ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__A ):
UpperCAmelCase : List[Any] = self.dist_env.copy()
UpperCAmelCase : Union[str, Any] = F'''{i + 1}'''
UpperCAmelCase : Union[str, Any] = strategy
with mockenv_context(**__A ):
UpperCAmelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy, ShardingStrategy(i + 1 ) )
def __magic_name__ ( self : Dict ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__A ):
UpperCAmelCase : int = self.dist_env.copy()
UpperCAmelCase : Dict = prefetch_policy
with mockenv_context(**__A ):
UpperCAmelCase : Optional[int] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch, BackwardPrefetch(i + 1 ) )
def __magic_name__ ( self : Any ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__A ):
UpperCAmelCase : Any = self.dist_env.copy()
UpperCAmelCase : int = state_dict_type
with mockenv_context(**__A ):
UpperCAmelCase : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type, StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[int] = AutoModel.from_pretrained(__A )
for policy in FSDP_AUTO_WRAP_POLICY:
UpperCAmelCase : Any = self.dist_env.copy()
UpperCAmelCase : List[Any] = policy
if policy == "TRANSFORMER_BASED_WRAP":
UpperCAmelCase : Tuple = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
UpperCAmelCase : List[str] = '''2000'''
with mockenv_context(**__A ):
UpperCAmelCase : Optional[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
UpperCAmelCase : List[str] = self.dist_env.copy()
UpperCAmelCase : Tuple = '''TRANSFORMER_BASED_WRAP'''
UpperCAmelCase : Optional[Any] = '''T5Layer'''
with mockenv_context(**__A ):
UpperCAmelCase : int = FullyShardedDataParallelPlugin()
with self.assertRaises(__A ) as cm:
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
UpperCAmelCase : List[Any] = self.dist_env.copy()
UpperCAmelCase : str = '''SIZE_BASED_WRAP'''
UpperCAmelCase : str = '''0'''
with mockenv_context(**__A ):
UpperCAmelCase : Optional[int] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __magic_name__ ( self : int ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
UpperCAmelCase : List[Any] = self.dist_env.copy()
UpperCAmelCase : int = mp_dtype
with mockenv_context(**__A ):
UpperCAmelCase : int = Accelerator()
if mp_dtype == "fp16":
UpperCAmelCase : Any = torch.floataa
elif mp_dtype == "bf16":
UpperCAmelCase : Any = torch.bfloataa
UpperCAmelCase : Optional[Any] = MixedPrecision(param_dtype=__A, reduce_dtype=__A, buffer_dtype=__A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy, __A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler, __A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__A )
def __magic_name__ ( self : Optional[int] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
UpperCAmelCase : Any = self.dist_env.copy()
UpperCAmelCase : int = str(__A ).lower()
with mockenv_context(**__A ):
UpperCAmelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload, CPUOffload(offload_params=__A ) )
@require_fsdp
@require_multi_gpu
@slow
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : List[Any] ):
super().setUp()
UpperCAmelCase : int = 0.8_2
UpperCAmelCase : List[str] = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
UpperCAmelCase : int = {
'''multi_gpu_fp16''': 3_2_0_0,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2_0_0_0,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1_9_0_0,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
UpperCAmelCase : List[str] = 1_6_0
UpperCAmelCase : Optional[int] = 1_6_0
UpperCAmelCase : Union[str, Any] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def __magic_name__ ( self : str ):
UpperCAmelCase : str = os.path.join(self.test_scripts_folder, '''test_performance.py''' )
UpperCAmelCase : Any = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
UpperCAmelCase : Union[str, Any] = cmd.copy()
for i, strategy in enumerate(__A ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A, env=os.environ.copy() )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Dict = os.path.join(self.test_scripts_folder, '''test_checkpointing.py''' )
UpperCAmelCase : Optional[int] = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(__A ):
UpperCAmelCase : Optional[Any] = cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
UpperCAmelCase : Any = len(__A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
UpperCAmelCase : List[Any] = cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A, env=os.environ.copy() )
UpperCAmelCase : Tuple = cmd_config[:-1]
UpperCAmelCase : List[str] = os.path.join(self.tmpdir, '''epoch_0''' )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A, env=os.environ.copy() )
def __magic_name__ ( self : Any ):
UpperCAmelCase : Optional[Any] = os.path.join(self.test_scripts_folder, '''test_peak_memory_usage.py''' )
UpperCAmelCase : str = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
UpperCAmelCase : List[Any] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(__A ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A, env=os.environ.copy() )
| 99 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 134 |
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int]=13 , lowerCAmelCase_ : Optional[int]=7 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=99 , lowerCAmelCase_ : List[Any]=32 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Dict=64 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : str=5_12 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Union[str, Any]=1 , ) -> List[Any]:
'''simple docstring'''
A__ : Dict =parent
A__ : Optional[int] =batch_size
A__ : List[Any] =seq_length
A__ : Any =is_training
A__ : List[str] =use_input_mask
A__ : str =use_token_type_ids
A__ : Tuple =use_labels
A__ : Tuple =vocab_size
A__ : Optional[Any] =hidden_size
A__ : Dict =num_hidden_layers
A__ : str =num_attention_heads
A__ : int =intermediate_size
A__ : Union[str, Any] =hidden_act
A__ : List[Any] =hidden_dropout_prob
A__ : Union[str, Any] =attention_probs_dropout_prob
A__ : Dict =max_position_embeddings
A__ : Any =type_vocab_size
A__ : Any =type_sequence_label_size
A__ : int =initializer_range
A__ : str =num_labels
A__ : Optional[int] =num_choices
A__ : Optional[int] =scope
A__ : List[str] =q_groups
A__ : Dict =k_groups
A__ : Any =v_groups
A__ : Optional[Any] =post_attention_groups
A__ : Optional[int] =intermediate_groups
A__ : Optional[int] =output_groups
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Optional[int] =None
if self.use_input_mask:
A__ : str =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] =None
A__ : Tuple =None
A__ : Dict =None
if self.use_labels:
A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : int =ids_tensor([self.batch_size] , self.num_choices )
A__ : str =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =SqueezeBertModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Dict =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =SqueezeBertForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
A__ : str =SqueezeBertForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Union[str, Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ : Dict =self.num_labels
A__ : int =SqueezeBertForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : str =self.num_labels
A__ : int =SqueezeBertForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.num_choices
A__ : Dict =SqueezeBertForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Any =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Optional[Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
A__ : Any =self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__)) : Any =config_and_inputs
A__ : str ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__snake_case = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = True
__snake_case = False
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ : Optional[Any] =SqueezeBertModelTester(self )
A__ : int =ConfigTester(self , config_class=lowerCAmelCase_ , dim=37 )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
A__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
A__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
A__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCAmelCase_ )
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCAmelCase_ )
@slow
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : int =SqueezeBertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
A__ : List[str] =SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
A__ : List[str] =torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
A__ : Tuple =model(lowerCAmelCase_ )[0]
A__ : Union[str, Any] =torch.Size((1, 3) )
self.assertEqual(output.shape , lowerCAmelCase_ )
A__ : Tuple =torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
| 134 | 1 |
import os
from collections.abc import Iterator
def SCREAMING_SNAKE_CASE__ ( snake_case_ = "." ) -> Iterator[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(snake_case_ ):
a = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(snake_case_ )[1] in (".py", ".ipynb"):
yield os.path.join(snake_case_, snake_case_ ).lstrip('''./''' )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[Any]:
"""simple docstring"""
return f"""{i * " "}*""" if i else "\n##"
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str:
"""simple docstring"""
a = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(snake_case_ ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(snake_case_ )} {new_part.replace("_", " " ).title()}""" )
return new_path
def SCREAMING_SNAKE_CASE__ ( snake_case_ = "." ) -> None:
"""simple docstring"""
a = ''''''
for filepath in sorted(good_file_paths(snake_case_ ) ):
a , a = os.path.split(snake_case_ )
if filepath != old_path:
a = print_path(snake_case_, snake_case_ )
a = (filepath.count(os.sep ) + 1) if filepath else 0
a = f"""{filepath}/{filename}""".replace(''' ''', '''%20''' )
a = os.path.splitext(filename.replace('''_''', ''' ''' ).title() )[0]
print(f"""{md_prefix(snake_case_ )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md(""".""")
| 330 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a_ )
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
SCREAMING_SNAKE_CASE_ = Features({'text': Value('string' )} )
SCREAMING_SNAKE_CASE_ = Features({} )
SCREAMING_SNAKE_CASE_ = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
return {self.text_column: "text"}
| 330 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCAmelCase_ ( _lowercase : float , _lowercase : float , _lowercase : bool = False) -> list[float]:
"""simple docstring"""
if radian_mode:
return [magnitude * cos(_lowercase), magnitude * sin(_lowercase)]
return [magnitude * cos(radians(_lowercase)), magnitude * sin(radians(_lowercase))]
def lowerCAmelCase_ ( _lowercase : NDArray[floataa] , _lowercase : NDArray[floataa] , _lowercase : float = 10**-1) -> bool:
"""simple docstring"""
a__ : NDArray[floataa] = cross(_lowercase , _lowercase)
a__ : float = sum(_lowercase)
return abs(_lowercase) < eps
if __name__ == "__main__":
# Test to check if it works
_lowercase : int =array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
_lowercase : NDArray[floataa] =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_lowercase : Union[str, Any] =array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_lowercase : Dict =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_lowercase : Tuple =array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
_lowercase : Any =array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 170 | 0 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_A : Union[str, Any] =Mapping[str, np.ndarray]
_A : Tuple =Mapping[str, Any] # Is a nested dict.
_A : Union[str, Any] =0.01
@dataclasses.dataclass(frozen=_lowercase )
class _lowercase :
a = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
a = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
a = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
a = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
a = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
a = None
# Optional remark about the protein. Included as a comment in output PDB
# files
a = None
# Templates used to generate this protein (prediction-only)
a = None
# Chain corresponding to each parent
a = None
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Protein:
lowerCamelCase__ : Optional[Any] = r"""(\[[A-Z]+\]\n)"""
lowerCamelCase__ : List[str] = [tag.strip() for tag in re.split(UpperCamelCase , UpperCamelCase ) if len(UpperCamelCase ) > 0]
lowerCamelCase__ : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("""\n""" ) for l in tags[1::2]] )
lowerCamelCase__ : List[str] = ["N", "CA", "C"]
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Any = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowerCamelCase__ : List[Any] = g[1][0].strip()
for i in range(len(UpperCamelCase ) ):
if seq[i] not in residue_constants.restypes:
lowerCamelCase__ : Any = """X""" # FIXME: strings are immutable
lowerCamelCase__ : Dict = np.array(
[residue_constants.restype_order.get(UpperCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowerCamelCase__ : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(UpperCamelCase , g[1][axis].split() ) ) )
lowerCamelCase__ : Optional[int] = np.array(UpperCamelCase )
lowerCamelCase__ : List[Any] = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(UpperCamelCase ):
lowerCamelCase__ : Tuple = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowerCamelCase__ : Dict = np.array(list(map({"""-""": 0, """+""": 1}.get , g[1][0].strip() ) ) )
lowerCamelCase__ : List[Any] = np.zeros(
(
len(UpperCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(UpperCamelCase ):
lowerCamelCase__ : int = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=UpperCamelCase , atom_mask=UpperCamelCase , aatype=UpperCamelCase , residue_index=np.arange(len(UpperCamelCase ) ) , b_factors=UpperCamelCase , )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase = 0 ) -> List[str]:
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[Any] = prot.remark
if remark is not None:
pdb_headers.append(f'''REMARK {remark}''' )
lowerCamelCase__ : Optional[Any] = prot.parents
lowerCamelCase__ : List[str] = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowerCamelCase__ : Optional[int] = [p for i, p in zip(UpperCamelCase , UpperCamelCase ) if i == chain_id]
if parents is None or len(UpperCamelCase ) == 0:
lowerCamelCase__ : str = ["""N/A"""]
pdb_headers.append(f'''PARENT {' '.join(UpperCamelCase )}''' )
return pdb_headers
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> str:
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Dict = pdb_str.split("""\n""" )
lowerCamelCase__ : int = prot.remark
if remark is not None:
out_pdb_lines.append(f'''REMARK {remark}''' )
lowerCamelCase__ : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
lowerCamelCase__ : List[Any] = []
if prot.parents_chain_index is not None:
lowerCamelCase__ : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(UpperCamelCase ) , [] )
parent_dict[str(UpperCamelCase )].append(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = max([int(UpperCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowerCamelCase__ : Optional[int] = parent_dict.get(str(UpperCamelCase ) , ["""N/A"""] )
parents_per_chain.append(UpperCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowerCamelCase__ : List[str] = [["""N/A"""]]
def make_parent_line(UpperCamelCase ) -> str:
return f'''PARENT {' '.join(UpperCamelCase )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowerCamelCase__ : Tuple = 0
for i, l in enumerate(UpperCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(UpperCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(UpperCamelCase ):
lowerCamelCase__ : Optional[Any] = parents_per_chain[chain_counter]
else:
lowerCamelCase__ : List[str] = ["""N/A"""]
out_pdb_lines.append(make_parent_line(UpperCamelCase ) )
return "\n".join(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
lowerCamelCase__ : Union[str, Any] = residue_constants.restypes + ["""X"""]
def res_atoa(UpperCamelCase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , """UNK""" )
lowerCamelCase__ : Union[str, Any] = residue_constants.atom_types
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[str] = prot.atom_mask
lowerCamelCase__ : List[str] = prot.aatype
lowerCamelCase__ : Optional[Any] = prot.atom_positions
lowerCamelCase__ : Tuple = prot.residue_index.astype(np.intaa )
lowerCamelCase__ : Union[str, Any] = prot.b_factors
lowerCamelCase__ : List[str] = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("""Invalid aatypes.""" )
lowerCamelCase__ : Dict = get_pdb_headers(UpperCamelCase )
if len(UpperCamelCase ) > 0:
pdb_lines.extend(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = aatype.shape[0]
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Union[str, Any] = string.ascii_uppercase
lowerCamelCase__ : Dict = None
# Add all atom sites.
for i in range(UpperCamelCase ):
lowerCamelCase__ : List[str] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(UpperCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowerCamelCase__ : Any = """ATOM"""
lowerCamelCase__ : Union[str, Any] = atom_name if len(UpperCamelCase ) == 4 else f''' {atom_name}'''
lowerCamelCase__ : Optional[Any] = """"""
lowerCamelCase__ : Optional[int] = """"""
lowerCamelCase__ : Any = 1.00
lowerCamelCase__ : Union[str, Any] = atom_name[0] # Protein supports only C, N, O, S, this works.
lowerCamelCase__ : Any = """"""
lowerCamelCase__ : Dict = """A"""
if chain_index is not None:
lowerCamelCase__ : Optional[int] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowerCamelCase__ : Dict = (
f'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
f'''{res_name_a:>3} {chain_tag:>1}'''
f'''{residue_index[i]:>4}{insertion_code:>1} '''
f'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
f'''{occupancy:>6.2f}{b_factor:>6.2f} '''
f'''{element:>2}{charge:>2}'''
)
pdb_lines.append(UpperCamelCase )
atom_index += 1
lowerCamelCase__ : Union[str, Any] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : List[Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowerCamelCase__ : Dict = """TER"""
lowerCamelCase__ : List[Any] = (
f'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(UpperCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(UpperCamelCase , UpperCamelCase ) )
pdb_lines.append("""END""" )
pdb_lines.append("""""" )
return "\n".join(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , ) -> Protein:
return Protein(
aatype=features["""aatype"""] , atom_positions=result["""final_atom_positions"""] , atom_mask=result["""final_atom_mask"""] , residue_index=features["""residue_index"""] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ) , chain_index=UpperCamelCase , remark=UpperCamelCase , parents=UpperCamelCase , parents_chain_index=UpperCamelCase , )
| 369 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : List[str] ={
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _lowercase ( _lowercase ):
a = """marian"""
a = ["""past_key_values"""]
a = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=58_101 , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Any=12 , UpperCamelCase__: Optional[int]=4_096 , UpperCamelCase__: Tuple=16 , UpperCamelCase__: Dict=12 , UpperCamelCase__: Optional[Any]=4_096 , UpperCamelCase__: Any=16 , UpperCamelCase__: List[str]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Optional[int]="gelu" , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Optional[int]=0.1 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: Optional[int]=0.02 , UpperCamelCase__: str=58_100 , UpperCamelCase__: Tuple=False , UpperCamelCase__: Optional[Any]=58_100 , UpperCamelCase__: int=0 , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[str]=True , **UpperCamelCase__: str , ):
lowerCamelCase__ : int = vocab_size
lowerCamelCase__ : Tuple = decoder_vocab_size or vocab_size
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Optional[Any] = d_model
lowerCamelCase__ : int = encoder_ffn_dim
lowerCamelCase__ : Union[str, Any] = encoder_layers
lowerCamelCase__ : Dict = encoder_attention_heads
lowerCamelCase__ : Optional[int] = decoder_ffn_dim
lowerCamelCase__ : List[str] = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : int = dropout
lowerCamelCase__ : str = attention_dropout
lowerCamelCase__ : Dict = activation_dropout
lowerCamelCase__ : List[str] = activation_function
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : str = encoder_layerdrop
lowerCamelCase__ : Any = decoder_layerdrop
lowerCamelCase__ : List[str] = use_cache
lowerCamelCase__ : List[str] = encoder_layers
lowerCamelCase__ : int = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase__ : str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _lowercase ( _lowercase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase_ ( self: Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : List[str] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCamelCase__ : Dict = {0: """batch"""}
lowerCamelCase__ : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowerCamelCase__ : Any = {0: """batch""", 1: """decoder_sequence"""}
lowerCamelCase__ : Dict = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers
for i in range(UpperCamelCase__ ):
lowerCamelCase__ : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCamelCase__ : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
lowerCamelCase__ : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase_ ( self: Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict = super().outputs
else:
lowerCamelCase__ : Any = super(UpperCamelCase__ , self ).outputs
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers
for i in range(UpperCamelCase__ ):
lowerCamelCase__ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCamelCase__ : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowerCamelCase_ ( self: str , UpperCamelCase__: PreTrainedTokenizer , UpperCamelCase__: int = -1 , UpperCamelCase__: int = -1 , UpperCamelCase__: bool = False , UpperCamelCase__: Optional[TensorType] = None , ):
lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
lowerCamelCase__ : Any = seq_length if not self.use_past else 1
lowerCamelCase__ : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__ : Optional[int] = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = common_inputs["""input_ids"""].shape
lowerCamelCase__ : Tuple = common_inputs["""decoder_input_ids"""].shape[1]
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.num_attention_heads
lowerCamelCase__ : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Tuple = decoder_seq_length + 3
lowerCamelCase__ : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__ : Optional[int] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
lowerCamelCase__ : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ , lowerCamelCase__ : Any = self.num_layers
lowerCamelCase__ : str = min(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
lowerCamelCase__ : int = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
lowerCamelCase__ : Union[str, Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: PreTrainedTokenizer , UpperCamelCase__: int = -1 , UpperCamelCase__: int = -1 , UpperCamelCase__: bool = False , UpperCamelCase__: Optional[TensorType] = None , ):
lowerCamelCase__ : Any = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : Any = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Optional[Any] = seqlen + 2
lowerCamelCase__ , lowerCamelCase__ : Dict = self.num_layers
lowerCamelCase__ , lowerCamelCase__ : Dict = self.num_attention_heads
lowerCamelCase__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Optional[Any] = common_inputs["""attention_mask"""].dtype
lowerCamelCase__ : int = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
lowerCamelCase__ : int = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: PreTrainedTokenizer , UpperCamelCase__: int = -1 , UpperCamelCase__: int = -1 , UpperCamelCase__: bool = False , UpperCamelCase__: Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase__ : List[Any] = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ : Union[str, Any] = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
lowerCamelCase__ : Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ : Union[str, Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase__ : str = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: PreTrainedTokenizer , UpperCamelCase__: int = -1 , UpperCamelCase__: int = -1 , UpperCamelCase__: bool = False , UpperCamelCase__: Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
lowerCamelCase__ : Tuple = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
lowerCamelCase__ : List[Any] = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@property
def lowerCamelCase_ ( self: Union[str, Any] ):
return 1e-4
| 129 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__SCREAMING_SNAKE_CASE :Dict = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__SCREAMING_SNAKE_CASE :Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22 | import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
_snake_case = parser.parse_args()
_snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_snake_case = CLIPImageProcessor()
_snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
_snake_case = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 157 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Any:
"""simple docstring"""
UpperCamelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_pad
def A__ ( self ) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
"""simple docstring"""
if not batched:
UpperCamelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
UpperCamelCase ,UpperCamelCase = image.size
else:
UpperCamelCase ,UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase = self.size["""shortest_edge"""]
elif w > h:
UpperCamelCase = self.size["""shortest_edge"""]
UpperCamelCase = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase = self.size["""shortest_edge"""]
UpperCamelCase = self.size["""shortest_edge"""]
else:
UpperCamelCase = []
for image in image_inputs:
UpperCamelCase ,UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
UpperCamelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( lowerCamelCase , unittest.TestCase ):
lowercase = ConditionalDetrImageProcessor if is_vision_available() else None
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = ConditionalDetrImageProcessingTester(self )
@property
def A__ ( self ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size""" ) )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"""image_id""": 39769, """annotations""": target}
# encode them
UpperCamelCase = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
UpperCamelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _SCREAMING_SNAKE_CASE ) )
@slow
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
UpperCamelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
UpperCamelCase = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
UpperCamelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _SCREAMING_SNAKE_CASE ) )
# verify masks
UpperCamelCase = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _SCREAMING_SNAKE_CASE ) )
| 366 |
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE__ = 'scheduler_config.json'
class a_ ( lowerCamelCase ):
lowercase = 1
lowercase = 2
lowercase = 3
lowercase = 4
lowercase = 5
lowercase = 6
lowercase = 7
lowercase = 8
lowercase = 9
lowercase = 10
lowercase = 11
lowercase = 12
lowercase = 13
lowercase = 14
@dataclass
class a_ ( lowerCamelCase ):
lowercase = 42
class a_ :
lowercase = SCHEDULER_CONFIG_NAME
lowercase = []
lowercase = True
@classmethod
def A__ ( cls , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = cls.load_config(
pretrained_model_name_or_path=_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE , return_unused_kwargs=_SCREAMING_SNAKE_CASE , return_commit_hash=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
return cls.from_config(_SCREAMING_SNAKE_CASE , return_unused_kwargs=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
self.save_config(save_directory=_SCREAMING_SNAKE_CASE , push_to_hub=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def A__ ( cls ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase = importlib.import_module(__name__.split(""".""" )[0] )
UpperCamelCase = [
getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for c in compatible_classes_str if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
return compatible_classes
| 183 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : Union[str, Any] = ['note_seq']
def __init__( self :int , *_A :Dict , **_A :Any ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['note_seq'] )
@classmethod
def lowercase_ ( cls :Optional[int] , *_A :List[Any] , **_A :Optional[int] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['note_seq'] )
@classmethod
def lowercase_ ( cls :str , *_A :Any , **_A :Dict ) -> str:
'''simple docstring'''
requires_backends(cls , ['note_seq'] )
| 161 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : torch.FloatTensor
UpperCAmelCase__ : torch.FloatTensor
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : int = 1
@register_to_config
def __init__( self :Optional[Any] , _A :int = 2_000 , _A :float = 0.15 , _A :float = 0.01 , _A :float = 1_348.0 , _A :float = 1E-5 , _A :int = 1 , ) -> Optional[Any]:
'''simple docstring'''
__A = sigma_max
# setable values
__A = None
self.set_sigmas(_A , _A , _A , _A )
def lowercase_ ( self :Optional[int] , _A :torch.FloatTensor , _A :Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def lowercase_ ( self :Tuple , _A :int , _A :float = None , _A :Union[str, torch.device] = None ) -> Optional[int]:
'''simple docstring'''
__A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__A = torch.linspace(1 , _A , _A , device=_A )
def lowercase_ ( self :Any , _A :int , _A :float = None , _A :float = None , _A :float = None ) -> Optional[int]:
'''simple docstring'''
__A = sigma_min if sigma_min is not None else self.config.sigma_min
__A = sigma_max if sigma_max is not None else self.config.sigma_max
__A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_A , _A )
__A = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__A = torch.exp(torch.linspace(math.log(_A ) , math.log(_A ) , _A ) )
__A = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase_ ( self :List[Any] , _A :Any , _A :Optional[Any] ) -> str:
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def lowercase_ ( self :List[str] , _A :torch.FloatTensor , _A :int , _A :torch.FloatTensor , _A :Optional[torch.Generator] = None , _A :bool = True , ) -> Union[SdeVeOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__A = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__A = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__A = timesteps.to(self.discrete_sigmas.device )
__A = self.discrete_sigmas[timesteps].to(sample.device )
__A = self.get_adjacent_sigma(_A , _A ).to(sample.device )
__A = torch.zeros_like(_A )
__A = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__A = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__A = diffusion.unsqueeze(-1 )
__A = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__A = randn_tensor(
sample.shape , layout=sample.layout , generator=_A , device=sample.device , dtype=sample.dtype )
__A = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__A = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_A , prev_sample_mean=_A )
def lowercase_ ( self :str , _A :torch.FloatTensor , _A :torch.FloatTensor , _A :Optional[torch.Generator] = None , _A :bool = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__A = randn_tensor(sample.shape , layout=sample.layout , generator=_A ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__A = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__A = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__A = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__A = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__A = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__A = step_size.unsqueeze(-1 )
__A = sample + step_size * model_output
__A = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_A )
def lowercase_ ( self :Any , _A :torch.FloatTensor , _A :torch.FloatTensor , _A :torch.FloatTensor , ) -> torch.FloatTensor:
'''simple docstring'''
__A = timesteps.to(original_samples.device )
__A = self.discrete_sigmas.to(original_samples.device )[timesteps]
__A = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_A ) * sigmas[:, None, None, None]
)
__A = noise + original_samples
return noisy_samples
def __len__( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 161 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __snake_case ( unittest.TestCase , _UpperCamelCase ):
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Tuple = load_tool('text-to-speech')
self.tool.setup()
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
torch.manual_seed(0)
a__: Optional[int] = self.tool('hey')
a__: Dict = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485]) , ))
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: str = self.tool('hey')
a__: List[str] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485]) , ))
| 368 | """simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self , lowercase , lowercase=12 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=0.02 , lowercase=0 , lowercase=None , ) -> Optional[int]:
'''simple docstring'''
a__: List[Any] = parent
a__: Any = batch_size
a__: int = seq_length
a__: List[str] = is_training
a__: Any = use_input_mask
a__: Optional[Any] = use_labels
a__: List[Any] = vocab_size
a__: Optional[Any] = hidden_size
a__: Any = projection_dim
a__: List[str] = num_hidden_layers
a__: Dict = num_attention_heads
a__: int = intermediate_size
a__: Tuple = dropout
a__: Union[str, Any] = attention_dropout
a__: str = max_position_embeddings
a__: List[str] = initializer_range
a__: Optional[int] = scope
a__: List[Any] = bos_token_id
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__: str = None
if self.use_input_mask:
a__: List[str] = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
a__: str = input_mask.numpy()
a__ , a__: Any = input_mask.shape
a__: Tuple = np.random.randint(1 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(lowercase):
a__: Tuple = 1
a__: Any = 0
a__: Any = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowercase)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__: Any = TFBlipTextModel(config=lowercase)
a__: Union[str, Any] = model(lowercase , attention_mask=lowercase , training=lowercase)
a__: List[Any] = model(lowercase , training=lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Tuple = self.prepare_config_and_inputs()
a__ , a__ , a__: Optional[Any] = config_and_inputs
a__: Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = (TFBlipTextModel,) if is_tf_available() else ()
a__ = False
a__ = False
a__ = False
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[Any] = BlipTextModelTester(self)
a__: int = ConfigTester(self , config_class=lowercase , hidden_size=37)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
pass
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='Blip does not use inputs_embeds')
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING')
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING')
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
pass
@slow
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: Tuple = TFBlipTextModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def lowerCamelCase_ ( self , lowercase=True) -> str:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=lowercase)
| 203 | 0 |
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase ) -> bool:
"""simple docstring"""
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def __lowerCamelCase ( __UpperCamelCase ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : Dict = credit_card_number
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Optional[Any] = len(__UpperCamelCase ) - 2
for i in range(__UpperCamelCase , -1 , -2 ):
# double the value of every second digit
lowerCAmelCase_ : List[str] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCAmelCase_ : List[str] = cc_number[:i] + str(__UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __lowerCamelCase ( __UpperCamelCase ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : Dict = f'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(f'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(__UpperCamelCase ) <= 16:
print(f'''{error_message} of its length.''' )
return False
if not validate_initial_digits(__UpperCamelCase ):
print(f'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(__UpperCamelCase ):
print(f'''{error_message} it fails the Luhn check.''' )
return False
print(f'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 241 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : int = 3
lowerCAmelCase_ : Dict = (32, 32)
lowerCAmelCase_ : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a_ )
return image
@property
def lowerCamelCase ( self : List[Any] ):
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def lowerCamelCase ( self : Tuple ):
torch.manual_seed(0 )
lowerCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowerCamelCase ( self : List[str] ):
torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(a_ )
@property
def lowerCamelCase ( self : Union[str, Any] ):
def extract(*a_ : Tuple , **a_ : Tuple ):
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] = torch.ones([0] )
def lowerCamelCase ( self : str , a_ : Optional[int] ):
self.pixel_values.to(a_ )
return self
return Out()
return extract
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : List[Any] = self.dummy_cond_unet
lowerCAmelCase_ : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCAmelCase_ : List[Any] = self.dummy_vae
lowerCAmelCase_ : List[str] = self.dummy_text_encoder
lowerCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : Optional[Any] = StableDiffusionPipeline(
unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : str = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : Any = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = sd_pipe([prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase_ : str = output.images
lowerCAmelCase_ : Dict = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : str = sd_pipe(
[prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a_ , )[0]
lowerCAmelCase_ : str = image[0, -3:, -3:, -1]
lowerCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : Any = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Union[str, Any] = self.dummy_cond_unet
lowerCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=a_ )
lowerCAmelCase_ : List[Any] = self.dummy_vae
lowerCAmelCase_ : List[str] = self.dummy_text_encoder
lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : List[str] = StableDiffusionPipeline(
unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Optional[Any] = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : Any = sd_pipe([prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : Optional[int] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a_ , )[0]
lowerCAmelCase_ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=a_ )
assert isinstance(a_ , a_ )
assert isinstance(pipe.scheduler , a_ )
assert pipe.safety_checker is None
lowerCAmelCase_ : str = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
lowerCAmelCase_ : List[str] = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase_ : Any = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : str = self.dummy_cond_unet
lowerCAmelCase_ : str = PNDMScheduler(skip_prk_steps=a_ )
lowerCAmelCase_ : Tuple = self.dummy_vae
lowerCAmelCase_ : Dict = self.dummy_text_encoder
lowerCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
lowerCAmelCase_ : int = unet.half()
lowerCAmelCase_ : Dict = vae.half()
lowerCAmelCase_ : List[Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : Optional[int] = StableDiffusionPipeline(
unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : List[str] = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : Optional[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a_ )
lowerCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase_ : str = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : List[str] = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
lowerCAmelCase_ : Optional[int] = 40_03_66_03_46
lowerCAmelCase_ : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCAmelCase_ : Union[str, Any] = torch.manual_seed(a_ )
lowerCAmelCase_ : Union[str, Any] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Union[str, Any] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowerCAmelCase_ : List[str] = torch.manual_seed(a_ )
lowerCAmelCase_ : Any = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ : Optional[Any] = output.images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Optional[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a_ )
lowerCAmelCase_ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase_ : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Union[str, Any] = "padme amidala taking a bath artwork, safe for work, no nudity"
lowerCAmelCase_ : Union[str, Any] = 27_34_97_17_55
lowerCAmelCase_ : Union[str, Any] = 7
lowerCAmelCase_ : str = torch.manual_seed(a_ )
lowerCAmelCase_ : Dict = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCAmelCase_ : Any = output.images
lowerCAmelCase_ : int = image[0, -3:, -3:, -1]
lowerCAmelCase_ : int = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowerCAmelCase_ : Optional[int] = torch.manual_seed(a_ )
lowerCAmelCase_ : Union[str, Any] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ : Any = output.images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
lowerCAmelCase_ : Any = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Tuple = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
lowerCAmelCase_ : List[Any] = 10_44_35_52_34
lowerCAmelCase_ : Dict = 12
lowerCAmelCase_ : int = torch.manual_seed(a_ )
lowerCAmelCase_ : List[str] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCAmelCase_ : int = output.images
lowerCAmelCase_ : int = image[0, -3:, -3:, -1]
lowerCAmelCase_ : int = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowerCAmelCase_ : int = torch.manual_seed(a_ )
lowerCAmelCase_ : Any = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ : Optional[Any] = output.images
lowerCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 241 | 1 |
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCamelCase = _modexpt(UpperCamelCase_ , exponent // 2 , UpperCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase_ , exponent - 1 , UpperCamelCase_ )) % modulo_value
def lowercase( UpperCamelCase_ = 1777 , UpperCamelCase_ = 1855 , UpperCamelCase_ = 8 ) -> int:
'''simple docstring'''
UpperCamelCase = base
for _ in range(1 , UpperCamelCase_ ):
UpperCamelCase = _modexpt(UpperCamelCase_ , UpperCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 165 | from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(
__lowerCAmelCase , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : GenericTensor ):
"""simple docstring"""
if self.framework == "tf":
UpperCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCamelCase_ )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : GenericTensor ):
"""simple docstring"""
UpperCamelCase = self.get_masked_index(lowerCamelCase_ )
UpperCamelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : GenericTensor ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any]=None , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
if return_tensors is None:
UpperCamelCase = self.framework
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ )
self.ensure_exactly_one_mask_token(lowerCamelCase_ )
return model_inputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = self.model(**lowerCamelCase_ )
UpperCamelCase = model_inputs["""input_ids"""]
return model_outputs
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Dict , lowerCamelCase_ : str=5 , lowerCamelCase_ : Optional[int]=None ):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCamelCase = target_ids.shape[0]
UpperCamelCase = model_outputs["""input_ids"""][0]
UpperCamelCase = model_outputs["""logits"""]
if self.framework == "tf":
UpperCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCamelCase = outputs.numpy()
UpperCamelCase = outputs[0, masked_index, :]
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1 )
if target_ids is not None:
UpperCamelCase = tf.gather_nd(tf.squeeze(lowerCamelCase_ , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCamelCase = tf.expand_dims(lowerCamelCase_ , 0 )
UpperCamelCase = tf.math.top_k(lowerCamelCase_ , k=lowerCamelCase_ )
UpperCamelCase , UpperCamelCase = topk.values.numpy(), topk.indices.numpy()
else:
UpperCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCamelCase_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCamelCase = outputs[0, masked_index, :]
UpperCamelCase = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCamelCase = probs[..., target_ids]
UpperCamelCase , UpperCamelCase = probs.topk(lowerCamelCase_ )
UpperCamelCase = []
UpperCamelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCamelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCamelCase = input_ids.numpy().copy()
if target_ids is not None:
UpperCamelCase = target_ids[p].tolist()
UpperCamelCase = p
# Filter padding out:
UpperCamelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCamelCase = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
UpperCamelCase = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(lowerCamelCase_ )
result.append(lowerCamelCase_ )
if single_mask:
return result[0]
return result
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any]=None ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = [targets]
try:
UpperCamelCase = self.tokenizer.get_vocab()
except Exception:
UpperCamelCase = {}
UpperCamelCase = []
for target in targets:
UpperCamelCase = vocab.get(lowerCamelCase_ , lowerCamelCase_ )
if id_ is None:
UpperCamelCase = self.tokenizer(
lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , max_length=1 , truncation=lowerCamelCase_ , )["""input_ids"""]
if len(lowerCamelCase_ ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
UpperCamelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCamelCase = list(set(lowerCamelCase_ ) )
if len(lowerCamelCase_ ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
UpperCamelCase = np.array(lowerCamelCase_ )
return target_ids
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Any=None ):
"""simple docstring"""
UpperCamelCase = {}
if targets is not None:
UpperCamelCase = self.get_target_ids(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = target_ids
if top_k is not None:
UpperCamelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self : Tuple , lowerCamelCase_ : str , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCamelCase = super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) == 1:
return outputs[0]
return outputs
| 165 | 1 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ):
'''simple docstring'''
__lowerCamelCase = int(np.ceil((x_end - xa) / step_size ) )
__lowerCamelCase = np.zeros((n + 1,) )
__lowerCamelCase = ya
__lowerCamelCase = xa
for k in range(A__ ):
__lowerCamelCase = y[k] + step_size * ode_func(A__ , y[k] )
__lowerCamelCase = y[k] + (
(step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
"""simple docstring"""
import os
import sys
a :Union[str, Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a :int = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Dict:
return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[int]:
return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> str:
return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> int:
return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
| 132 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowercase_ = datasets.load_iris()
lowercase_ = np.array(data["data"])
lowercase_ = np.array(data["target"])
lowercase_ = data["target_names"]
lowercase_ , lowercase_ , lowercase_ , lowercase_ = train_test_split(X, y)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
return np.linalg.norm(np.array(snake_case__ ) - np.array(snake_case__ ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=5 ):
'''simple docstring'''
__snake_case : List[Any] = zip(snake_case__ , snake_case__ )
# List of distances of all points from the point to be classified
__snake_case : Optional[Any] = []
for data_point in data:
__snake_case : Optional[Any] = euclidean_distance(data_point[0] , snake_case__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__snake_case : Optional[int] = [i[1] for i in sorted(snake_case__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__snake_case : List[Any] = Counter(snake_case__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 364 | import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowercase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : str = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Union[str, Any] = {}
__snake_case : List[Any] = R""".*sequential.(\d+).*"""
__snake_case : Union[str, Any] = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
__snake_case : Optional[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
__snake_case : Dict = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.''' )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case : str = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[Any] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : Optional[int] = value
__snake_case : Any = mixed_qkv.size(0 ) // 3
__snake_case : List[Any] = mixed_qkv[:qkv_dim]
__snake_case : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : List[Any] = mixed_qkv[qkv_dim * 2 :]
__snake_case : Any = query_layer
__snake_case : Dict = key_layer
__snake_case : Optional[Any] = value_layer
else:
__snake_case : List[str] = value
return model_state_dict
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : List[str] = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
__snake_case : Tuple = clap_model.state_dict()
__snake_case : Union[str, Any] = rename_state_dict(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = ClapConfig()
__snake_case : Tuple = enable_fusion
__snake_case : Any = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowercase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 20 | 0 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = np.inf
def set_batch_size(_SCREAMING_SNAKE_CASE ) -> None:
nonlocal batch_size
if isinstance(_A , _A ):
UpperCamelCase = min(_A , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_A , _A ):
UpperCamelCase = min(_A , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_A , _A ) and feature.dtype == "binary":
UpperCamelCase = min(_A , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_A , _A )
return None if batch_size is np.inf else batch_size
class _lowerCamelCase ( lowerCamelCase__ ):
def __init__(self , __a , __a = None , __a = None , __a = None , __a = False , __a = False , __a = None , **__a , ) -> List[Any]:
super().__init__(
lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , )
UpperCamelCase = path_or_paths if isinstance(lowercase_ , lowercase_ ) else {self.split: path_or_paths}
UpperCamelCase = _PACKAGED_DATASETS_MODULES["parquet"][1]
UpperCamelCase = Parquet(
cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , )
def snake_case_ (self ) -> Dict:
if self.streaming:
UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , )
UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory )
return dataset
class _lowerCamelCase :
def __init__(self , __a , __a , __a = None , **__a , ) -> Dict:
UpperCamelCase = dataset
UpperCamelCase = path_or_buf
UpperCamelCase = batch_size or get_writer_batch_size(dataset.features )
UpperCamelCase = parquet_writer_kwargs
def snake_case_ (self ) -> int:
UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
UpperCamelCase = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs )
else:
UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs )
return written
def snake_case_ (self , __a , __a , **__a ) -> int:
UpperCamelCase = 0
UpperCamelCase = parquet_writer_kwargs.pop("path_or_buf" , lowercase_ )
UpperCamelCase = self.dataset.features.arrow_schema
UpperCamelCase = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , lowercase_ ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
UpperCamelCase = query_table(
table=self.dataset._data , key=slice(lowercase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowercase_ )
written += batch.nbytes
writer.close()
return written
| 153 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 188 | 0 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
lowercase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCamelCase ( self : Dict , snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : str ):
snake_case__ : List[Any] = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
snake_case__ : str = VideoClassificationPipeline(model=snake_case_ , image_processor=snake_case_ , top_k=2 )
snake_case__ : Optional[Any] = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def lowerCamelCase ( self : List[str] , snake_case_ : Any , snake_case_ : List[str] ):
for example in examples:
snake_case__ : str = video_classifier(snake_case_ )
self.assertEqual(
snake_case_ , [
{"""score""": ANY(snake_case_ ), """label""": ANY(snake_case_ )},
{"""score""": ANY(snake_case_ ), """label""": ANY(snake_case_ )},
] , )
@require_torch
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Tuple = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
snake_case__ : Dict = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
snake_case__ : int = pipeline(
"""video-classification""" , model=snake_case_ , feature_extractor=snake_case_ , frame_sampling_rate=4 )
snake_case__ : Any = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
snake_case__ : List[str] = video_classifier(snake_case_ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , )
snake_case__ : Union[str, Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] , )
@require_tf
def lowerCamelCase ( self : Union[str, Any] ):
pass
| 356 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Dict ):
snake_case__ : List[str] = {}
def lowerCamelCase ( self : List[Any] , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Tuple=1 ):
if self.graph.get(snake_case_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case__ : Tuple = [[w, v]]
if not self.graph.get(snake_case_ ):
snake_case__ : Optional[Any] = []
def lowerCamelCase ( self : List[str] ):
return list(self.graph )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Dict ):
if self.graph.get(snake_case_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : Tuple=-2 , snake_case_ : Tuple=-1 ):
if s == d:
return []
snake_case__ : Optional[Any] = []
snake_case__ : List[Any] = []
if s == -2:
snake_case__ : Union[str, Any] = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
snake_case__ : Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(snake_case_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(snake_case_ ) != 0:
snake_case__ : Tuple = stack[len(snake_case_ ) - 1]
else:
snake_case__ : Tuple = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return visited
def lowerCamelCase ( self : Optional[Any] , snake_case_ : Any=-1 ):
if c == -1:
snake_case__ : Union[str, Any] = floor(random() * 10_000 ) + 10
for i in range(snake_case_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case__ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(snake_case_ , snake_case_ , 1 )
def lowerCamelCase ( self : List[Any] , snake_case_ : str=-2 ):
snake_case__ : Tuple = deque()
snake_case__ : str = []
if s == -2:
snake_case__ : str = list(self.graph )[0]
d.append(snake_case_ )
visited.append(snake_case_ )
while d:
snake_case__ : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Optional[Any] ):
snake_case__ : Optional[int] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCamelCase ( self : Optional[Any] , snake_case_ : Any ):
return len(self.graph[u] )
def lowerCamelCase ( self : List[str] , snake_case_ : Union[str, Any]=-2 ):
snake_case__ : str = []
snake_case__ : Any = []
if s == -2:
snake_case__ : Any = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
snake_case__ : Dict = s
snake_case__ : List[Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(snake_case_ ) != 0:
snake_case__ : Optional[int] = stack[len(snake_case_ ) - 1]
else:
snake_case__ : Union[str, Any] = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return sorted_nodes
def lowerCamelCase ( self : int ):
snake_case__ : List[str] = []
snake_case__ : Union[str, Any] = []
snake_case__ : Optional[int] = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
snake_case__ : List[Any] = -2
snake_case__ : Union[str, Any] = []
snake_case__ : Optional[Any] = s
snake_case__ : Optional[Any] = False
snake_case__ : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case__ : str = len(snake_case_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case__ : List[str] = True
if len(snake_case_ ) != 0:
snake_case__ : Any = stack[len(snake_case_ ) - 1]
else:
snake_case__ : Optional[Any] = False
indirect_parents.append(snake_case_ )
snake_case__ : Union[str, Any] = s
snake_case__ : str = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return list(snake_case_ )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : List[str] = []
snake_case__ : str = []
snake_case__ : Tuple = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
snake_case__ : Optional[int] = -2
snake_case__ : List[str] = []
snake_case__ : Optional[int] = s
snake_case__ : str = False
snake_case__ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case__ : Optional[Any] = len(snake_case_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case__ : List[str] = True
if len(snake_case_ ) != 0:
snake_case__ : List[str] = stack[len(snake_case_ ) - 1]
else:
snake_case__ : int = False
indirect_parents.append(snake_case_ )
snake_case__ : Any = s
snake_case__ : Tuple = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return False
def lowerCamelCase ( self : int , snake_case_ : List[Any]=-2 , snake_case_ : List[str]=-1 ):
snake_case__ : List[Any] = time()
self.dfs(snake_case_ , snake_case_ )
snake_case__ : Optional[Any] = time()
return end - begin
def lowerCamelCase ( self : int , snake_case_ : List[str]=-2 ):
snake_case__ : Any = time()
self.bfs(snake_case_ )
snake_case__ : List[str] = time()
return end - begin
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[str] ):
snake_case__ : List[str] = {}
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : int , snake_case_ : Union[str, Any]=1 ):
# check if the u exists
if self.graph.get(snake_case_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case__ : Dict = [[w, v]]
# add the other way
if self.graph.get(snake_case_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case__ : Any = [[w, u]]
def lowerCamelCase ( self : int , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
if self.graph.get(snake_case_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(snake_case_ )
# the other way round
if self.graph.get(snake_case_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : Tuple=-2 , snake_case_ : Union[str, Any]=-1 ):
if s == d:
return []
snake_case__ : Dict = []
snake_case__ : Optional[int] = []
if s == -2:
snake_case__ : Any = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
snake_case__ : Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(snake_case_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(snake_case_ ) != 0:
snake_case__ : str = stack[len(snake_case_ ) - 1]
else:
snake_case__ : Union[str, Any] = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return visited
def lowerCamelCase ( self : List[str] , snake_case_ : str=-1 ):
if c == -1:
snake_case__ : Union[str, Any] = floor(random() * 10_000 ) + 10
for i in range(snake_case_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case__ : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(snake_case_ , snake_case_ , 1 )
def lowerCamelCase ( self : str , snake_case_ : Dict=-2 ):
snake_case__ : Union[str, Any] = deque()
snake_case__ : Optional[int] = []
if s == -2:
snake_case__ : Tuple = list(self.graph )[0]
d.append(snake_case_ )
visited.append(snake_case_ )
while d:
snake_case__ : str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase ( self : Any , snake_case_ : Union[str, Any] ):
return len(self.graph[u] )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : str = []
snake_case__ : List[str] = []
snake_case__ : str = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
snake_case__ : Tuple = -2
snake_case__ : Optional[int] = []
snake_case__ : str = s
snake_case__ : int = False
snake_case__ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case__ : Tuple = len(snake_case_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case__ : Optional[Any] = True
if len(snake_case_ ) != 0:
snake_case__ : Dict = stack[len(snake_case_ ) - 1]
else:
snake_case__ : int = False
indirect_parents.append(snake_case_ )
snake_case__ : int = s
snake_case__ : Tuple = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return list(snake_case_ )
def lowerCamelCase ( self : str ):
snake_case__ : Tuple = []
snake_case__ : Tuple = []
snake_case__ : Any = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
snake_case__ : List[Any] = -2
snake_case__ : Dict = []
snake_case__ : str = s
snake_case__ : Optional[Any] = False
snake_case__ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case__ : Optional[int] = len(snake_case_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case__ : Any = True
if len(snake_case_ ) != 0:
snake_case__ : Any = stack[len(snake_case_ ) - 1]
else:
snake_case__ : Tuple = False
indirect_parents.append(snake_case_ )
snake_case__ : Optional[int] = s
snake_case__ : List[Any] = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return False
def lowerCamelCase ( self : Union[str, Any] ):
return list(self.graph )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : int=-2 , snake_case_ : Any=-1 ):
snake_case__ : int = time()
self.dfs(snake_case_ , snake_case_ )
snake_case__ : List[str] = time()
return end - begin
def lowerCamelCase ( self : List[Any] , snake_case_ : Union[str, Any]=-2 ):
snake_case__ : Optional[int] = time()
self.bfs(snake_case_ )
snake_case__ : str = time()
return end - begin
| 43 | 0 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def a__ ( self , _a , _a , _a ) -> Any:
_A : List[Any] = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
_A : Tuple = VideoClassificationPipeline(model=_a , image_processor=_a , top_k=2 )
_A : Optional[Any] = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def a__ ( self , _a , _a ) -> Any:
for example in examples:
_A : Optional[int] = video_classifier(_a )
self.assertEqual(
_a , [
{"""score""": ANY(_a ), """label""": ANY(_a )},
{"""score""": ANY(_a ), """label""": ANY(_a )},
] , )
@require_torch
def a__ ( self ) -> str:
_A : Tuple = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
_A : Tuple = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
_A : Any = pipeline(
"""video-classification""" , model=_a , feature_extractor=_a , frame_sampling_rate=4 )
_A : Optional[int] = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
_A : Any = video_classifier(_a , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , )
_A : Optional[int] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] , )
@require_tf
def a__ ( self ) -> str:
pass
| 26 |
'''simple docstring'''
lowerCAmelCase : str ='''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase : int =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase : List[str] ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 223 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _A ( SCREAMING_SNAKE_CASE__ ):
def lowercase__ ( self : Dict , __magic_name__ : Optional[int] ) -> float:
"""simple docstring"""
return 0.0
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__snake_case : Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = 512
__snake_case : List[str] = [1] + [0] * (size - 1)
__snake_case : Tuple = [filter_type.process(_a ) for item in inputs]
__snake_case : Any = [0] * (samplerate - size) # zero-padding
outputs += filler
__snake_case : int = np.abs(np.fft.fft(_a ) )
__snake_case : Optional[Any] = 20 * np.logaa(_a )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
__snake_case : Optional[Any] = get_bounds(_a , _a )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(_a )
plt.show()
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = 512
__snake_case : str = [1] + [0] * (size - 1)
__snake_case : Tuple = [filter_type.process(_a ) for item in inputs]
__snake_case : int = [0] * (samplerate - size) # zero-padding
outputs += filler
__snake_case : int = np.angle(np.fft.fft(_a ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(_a , -2 * pi ) )
plt.show()
| 363 |
'''simple docstring'''
__UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Dict = input("""Enter message: """ )
__snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ )
__snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__snake_case : Any = """encrypt"""
__snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase )
elif mode.lower().startswith("""d""" ):
__snake_case : Optional[int] = """decrypt"""
__snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase )
print(F'''\n{mode.title()}ed message:''' )
print(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : str = []
__snake_case : Dict = 0
__snake_case : Optional[int] = key.upper()
for symbol in message:
__snake_case : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowerCamelCase ):
__snake_case : Tuple = 0
else:
translated.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
main()
| 13 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class _lowerCamelCase ( UpperCAmelCase_ ):
UpperCAmelCase_ = 'instructblip_vision_model'
def __init__(self , __a=14_08 , __a=61_44 , __a=39 , __a=16 , __a=2_24 , __a=14 , __a="gelu" , __a=1e-6 , __a=0.0 , __a=1e-1_0 , __a=True , **__a , ) -> Dict:
super().__init__(**__UpperCAmelCase )
UpperCamelCase = hidden_size
UpperCamelCase = intermediate_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = patch_size
UpperCamelCase = image_size
UpperCamelCase = initializer_range
UpperCamelCase = attention_dropout
UpperCamelCase = layer_norm_eps
UpperCamelCase = hidden_act
UpperCamelCase = qkv_bias
@classmethod
def snake_case_ (cls , __a , **__a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
UpperCamelCase , UpperCamelCase = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
UpperCamelCase = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class _lowerCamelCase ( UpperCAmelCase_ ):
UpperCAmelCase_ = 'instructblip_qformer'
def __init__(self , __a=3_05_22 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=0.02 , __a=1e-1_2 , __a=0 , __a="absolute" , __a=2 , __a=14_08 , **__a , ) -> Dict:
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = cross_attention_frequency
UpperCamelCase = encoder_hidden_size
@classmethod
def snake_case_ (cls , __a , **__a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
UpperCamelCase , UpperCamelCase = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
UpperCamelCase = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class _lowerCamelCase ( UpperCAmelCase_ ):
UpperCAmelCase_ = 'instructblip'
UpperCAmelCase_ = True
def __init__(self , __a=None , __a=None , __a=None , __a=32 , **__a ) -> Optional[int]:
super().__init__(**__UpperCAmelCase )
if vision_config is None:
UpperCamelCase = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
UpperCamelCase = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
UpperCamelCase = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
UpperCamelCase = InstructBlipVisionConfig(**__UpperCAmelCase )
UpperCamelCase = InstructBlipQFormerConfig(**__UpperCAmelCase )
UpperCamelCase = text_config["model_type"] if "model_type" in text_config else "opt"
UpperCamelCase = CONFIG_MAPPING[text_model_type](**__UpperCAmelCase )
UpperCamelCase = self.text_config.tie_word_embeddings
UpperCamelCase = self.text_config.is_encoder_decoder
UpperCamelCase = num_query_tokens
UpperCamelCase = self.vision_config.hidden_size
UpperCamelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCamelCase = 1.0
UpperCamelCase = 0.02
@classmethod
def snake_case_ (cls , __a , __a , __a , **__a , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__UpperCAmelCase , )
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.vision_config.to_dict()
UpperCamelCase = self.qformer_config.to_dict()
UpperCamelCase = self.text_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
| 153 | import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__A, __A )
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = emb.weight.shape
UpperCAmelCase__ = nn.Linear(__A, __A, bias=__A )
UpperCAmelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A, __A="facebook/mbart-large-en-ro", __A=False, __A=False ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = torch.load(__A, map_location="cpu" )["model"]
remove_ignore_keys_(__A )
UpperCAmelCase__ = state_dict["encoder.embed_tokens.weight"].shape[0]
UpperCAmelCase__ = MBartConfig.from_pretrained(__A, vocab_size=__A )
if mbart_aa and finetuned:
UpperCAmelCase__ = "relu"
UpperCAmelCase__ = state_dict["decoder.embed_tokens.weight"]
UpperCAmelCase__ = MBartForConditionalGeneration(__A )
model.model.load_state_dict(__A )
if finetuned:
UpperCAmelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 65 | 0 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _SCREAMING_SNAKE_CASE( A ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,**SCREAMING_SNAKE_CASE__ ,) -> int:
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE__ ,split=SCREAMING_SNAKE_CASE__ ,features=SCREAMING_SNAKE_CASE__ ,cache_dir=SCREAMING_SNAKE_CASE__ ,keep_in_memory=SCREAMING_SNAKE_CASE__ ,streaming=SCREAMING_SNAKE_CASE__ ,num_proc=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__SCREAMING_SNAKE_CASE :int = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
__SCREAMING_SNAKE_CASE :Optional[Any] = Text(
cache_dir=SCREAMING_SNAKE_CASE__ ,data_files=SCREAMING_SNAKE_CASE__ ,features=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
if self.streaming:
__SCREAMING_SNAKE_CASE :List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = None
__SCREAMING_SNAKE_CASE :str = None
__SCREAMING_SNAKE_CASE :Any = None
__SCREAMING_SNAKE_CASE :str = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ ,download_mode=SCREAMING_SNAKE_CASE__ ,verification_mode=SCREAMING_SNAKE_CASE__ ,base_path=SCREAMING_SNAKE_CASE__ ,num_proc=self.num_proc ,)
__SCREAMING_SNAKE_CASE :Dict = self.builder.as_dataset(
split=self.split ,verification_mode=SCREAMING_SNAKE_CASE__ ,in_memory=self.keep_in_memory )
return dataset | 239 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[Any] = '''levit'''
def __init__( self ,SCREAMING_SNAKE_CASE__=2_24 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=[1_28, 2_56, 3_84] ,SCREAMING_SNAKE_CASE__=[4, 8, 12] ,SCREAMING_SNAKE_CASE__=[4, 4, 4] ,SCREAMING_SNAKE_CASE__=[16, 16, 16] ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=[2, 2, 2] ,SCREAMING_SNAKE_CASE__=[2, 2, 2] ,SCREAMING_SNAKE_CASE__=0.0_2 ,**SCREAMING_SNAKE_CASE__ ,) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = image_size
__SCREAMING_SNAKE_CASE :Dict = num_channels
__SCREAMING_SNAKE_CASE :Optional[int] = kernel_size
__SCREAMING_SNAKE_CASE :Union[str, Any] = stride
__SCREAMING_SNAKE_CASE :List[Any] = padding
__SCREAMING_SNAKE_CASE :Tuple = hidden_sizes
__SCREAMING_SNAKE_CASE :List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE :Optional[int] = depths
__SCREAMING_SNAKE_CASE :Optional[Any] = key_dim
__SCREAMING_SNAKE_CASE :Optional[Any] = drop_path_rate
__SCREAMING_SNAKE_CASE :Tuple = patch_size
__SCREAMING_SNAKE_CASE :int = attention_ratio
__SCREAMING_SNAKE_CASE :List[Any] = mlp_ratio
__SCREAMING_SNAKE_CASE :str = initializer_range
__SCREAMING_SNAKE_CASE :Optional[Any] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : int = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCamelCase ( self ) -> float:
"""simple docstring"""
return 1E-4 | 239 | 1 |
import math
from datetime import datetime, timedelta
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> datetime:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = year % 19
SCREAMING_SNAKE_CASE__ = year % 4
SCREAMING_SNAKE_CASE__ = year % 7
SCREAMING_SNAKE_CASE__ = math.floor(year / 1_00 )
SCREAMING_SNAKE_CASE__ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
SCREAMING_SNAKE_CASE__ = leap_day_inhibits / 4
SCREAMING_SNAKE_CASE__ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
SCREAMING_SNAKE_CASE__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
SCREAMING_SNAKE_CASE__ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
SCREAMING_SNAKE_CASE__ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 18 )
else:
return datetime(__UpperCamelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
__lowerCamelCase : Optional[int] = '''will be''' if year > datetime.now().year else '''was'''
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 219 | import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Dict = '''Hello, World!'''
__lowerCamelCase : Optional[Any] = '''en_XX'''
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : bool ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Path("""data_bin""" )
SCREAMING_SNAKE_CASE__ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__UpperCamelCase ).parent ) , checkpoint_file=Path(__UpperCamelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(__UpperCamelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(__UpperCamelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = XmodForSequenceClassification(__UpperCamelCase ) if classification_head else XmodForMaskedLM(__UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layernorm_embedding.weight
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layers[i]
# self attention
SCREAMING_SNAKE_CASE__ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.out_proj.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
SCREAMING_SNAKE_CASE__ = xmod_layer.adapter_layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
SCREAMING_SNAKE_CASE__ = bert_output.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE__ = xmod_layer.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE__ = from_adapter.fca.weight
SCREAMING_SNAKE_CASE__ = from_adapter.fca.bias
SCREAMING_SNAKE_CASE__ = from_adapter.fca.weight
SCREAMING_SNAKE_CASE__ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layer_norm.bias
if classification_head:
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ = xmod.encode(__UpperCamelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCamelCase )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""](xmod.extract_features(__UpperCamelCase ) )
else:
SCREAMING_SNAKE_CASE__ = xmod.model(__UpperCamelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE__ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ = torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(__UpperCamelCase ).mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : str = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 219 | 1 |
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowerCAmelCase__ ( _UpperCamelCase : Any ) -> int:
"""simple docstring"""
snake_case = {}
snake_case = job['started_at']
snake_case = job['completed_at']
snake_case = date_parser.parse(_UpperCamelCase )
snake_case = date_parser.parse(_UpperCamelCase )
snake_case = round((end_datetime - start_datetime).total_seconds() / 60.0 )
snake_case = start
snake_case = end
snake_case = duration_in_min
return job_info
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Any=None ) -> Union[str, Any]:
"""simple docstring"""
snake_case = None
if token is not None:
snake_case = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
snake_case = requests.get(_UpperCamelCase , headers=_UpperCamelCase ).json()
snake_case = {}
try:
job_time.update({job['name']: extract_time_from_single_job(_UpperCamelCase ) for job in result['jobs']} )
snake_case = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(_UpperCamelCase ):
snake_case = requests.get(url + f"""&page={i + 2}""" , headers=_UpperCamelCase ).json()
job_time.update({job['name']: extract_time_from_single_job(_UpperCamelCase ) for job in result['jobs']} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v['duration']}""")
| 149 | """simple docstring"""
import requests
from bsa import BeautifulSoup
def lowerCAmelCase__ ( _UpperCamelCase : str = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
snake_case = BeautifulSoup(requests.get(_UpperCamelCase ).text , 'html.parser' )
snake_case = soup.findAll('h1' )
snake_case = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(_UpperCamelCase , _UpperCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 149 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''speech_to_text'''
_lowerCamelCase: Union[str, Any] = ['''past_key_values''']
_lowerCamelCase: Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Union[str, Any] ,A_ : Tuple=1_0000 ,A_ : Union[str, Any]=12 ,A_ : int=2048 ,A_ : List[Any]=4 ,A_ : int=6 ,A_ : List[Any]=2048 ,A_ : List[str]=4 ,A_ : Optional[Any]=0.0 ,A_ : str=0.0 ,A_ : Dict=True ,A_ : List[Any]=True ,A_ : int="relu" ,A_ : List[str]=256 ,A_ : Dict=0.1 ,A_ : List[Any]=0.0 ,A_ : List[Any]=0.0 ,A_ : Union[str, Any]=0.02 ,A_ : str=2 ,A_ : Union[str, Any]=True ,A_ : List[str]=1 ,A_ : Union[str, Any]=0 ,A_ : int=2 ,A_ : int=6000 ,A_ : Dict=1024 ,A_ : str=2 ,A_ : Tuple=(5, 5) ,A_ : List[str]=1024 ,A_ : str=80 ,A_ : int=1 ,**A_ : Union[str, Any] ,) -> Optional[Any]:
A = vocab_size
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = max_source_positions
A = max_target_positions
A = num_conv_layers
A = list(A_ )
A = conv_channels
A = input_feat_per_channel
A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F'but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '
F'`config.num_conv_layers = {self.num_conv_layers}`.' )
super().__init__(
pad_token_id=A_ ,bos_token_id=A_ ,eos_token_id=A_ ,is_encoder_decoder=A_ ,decoder_start_token_id=A_ ,**A_ ,) | 74 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
A = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
A = 'sgugger/tiny-distilbert-classification'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,only_pretrain_model=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,torchscript=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,fpaa=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
# set architectures equal to `None`
A = None
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,save_to_csv=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(A_ ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(A_ ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(A_ ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(A_ ,'train_time.csv' ) ,env_info_csv_file=os.path.join(A_ ,'env.csv' ) ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'env.csv' ) ).exists() )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(A_ : Optional[int] ):
self.assertTrue(hasattr(A_ ,'sequential' ) )
self.assertTrue(hasattr(A_ ,'cumulative' ) )
self.assertTrue(hasattr(A_ ,'current' ) )
self.assertTrue(hasattr(A_ ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(A_ ,'log.txt' ) ,log_print=A_ ,trace_memory_line_by_line=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A_ ,'log.txt' ) ).exists() ) | 74 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Dict = '''deta'''
_UpperCAmelCase : Union[str, Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[Any]=900 , lowerCAmelCase__ : str=2048 , lowerCAmelCase__ : Union[str, Any]=6 , lowerCAmelCase__ : List[Any]=2048 , lowerCAmelCase__ : int=8 , lowerCAmelCase__ : Optional[int]=6 , lowerCAmelCase__ : List[str]=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Dict="relu" , lowerCAmelCase__ : Any=256 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : List[str]=0.0 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Union[str, Any]=1.0 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : Union[str, Any]="sine" , lowerCAmelCase__ : List[Any]=5 , lowerCAmelCase__ : Tuple=4 , lowerCAmelCase__ : Optional[int]=4 , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Dict=300 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : int=1 , lowerCAmelCase__ : str=5 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Union[str, Any]=1 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : List[str]=5 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : List[Any]=0.25 , **lowerCAmelCase__ : List[Any] , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"])
else:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = backbone_config.pop("model_type")
SCREAMING_SNAKE_CASE_: List[str] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_: str = config_class.from_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = backbone_config
SCREAMING_SNAKE_CASE_: Optional[Any] = num_queries
SCREAMING_SNAKE_CASE_: Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE_: Optional[int] = d_model
SCREAMING_SNAKE_CASE_: List[str] = encoder_ffn_dim
SCREAMING_SNAKE_CASE_: str = encoder_layers
SCREAMING_SNAKE_CASE_: List[str] = encoder_attention_heads
SCREAMING_SNAKE_CASE_: str = decoder_ffn_dim
SCREAMING_SNAKE_CASE_: Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE_: List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE_: Any = dropout
SCREAMING_SNAKE_CASE_: Any = attention_dropout
SCREAMING_SNAKE_CASE_: List[str] = activation_dropout
SCREAMING_SNAKE_CASE_: List[Any] = activation_function
SCREAMING_SNAKE_CASE_: Optional[int] = init_std
SCREAMING_SNAKE_CASE_: str = init_xavier_std
SCREAMING_SNAKE_CASE_: str = encoder_layerdrop
SCREAMING_SNAKE_CASE_: Union[str, Any] = auxiliary_loss
SCREAMING_SNAKE_CASE_: Dict = position_embedding_type
# deformable attributes
SCREAMING_SNAKE_CASE_: List[str] = num_feature_levels
SCREAMING_SNAKE_CASE_: Any = encoder_n_points
SCREAMING_SNAKE_CASE_: Dict = decoder_n_points
SCREAMING_SNAKE_CASE_: List[str] = two_stage
SCREAMING_SNAKE_CASE_: Optional[Any] = two_stage_num_proposals
SCREAMING_SNAKE_CASE_: Optional[Any] = with_box_refine
SCREAMING_SNAKE_CASE_: Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True.")
# Hungarian matcher
SCREAMING_SNAKE_CASE_: Any = class_cost
SCREAMING_SNAKE_CASE_: str = bbox_cost
SCREAMING_SNAKE_CASE_: Optional[int] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_: Union[str, Any] = mask_loss_coefficient
SCREAMING_SNAKE_CASE_: Optional[int] = dice_loss_coefficient
SCREAMING_SNAKE_CASE_: Tuple = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_: Tuple = giou_loss_coefficient
SCREAMING_SNAKE_CASE_: Optional[int] = eos_coefficient
SCREAMING_SNAKE_CASE_: List[str] = focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__)
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self : Any):
return self.d_model
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: int = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_: List[Any] = self.__class__.model_type
return output
| 127 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __lowercase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : float , lowerCAmelCase__ : Callable , lowerCAmelCase__ : int , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : str = None , ):
super().__init__()
SCREAMING_SNAKE_CASE_: str = initial_learning_rate
SCREAMING_SNAKE_CASE_: Dict = warmup_steps
SCREAMING_SNAKE_CASE_: Any = power
SCREAMING_SNAKE_CASE_: int = decay_schedule_fn
SCREAMING_SNAKE_CASE_: Union[str, Any] = name
def __call__( self : Optional[Any] , lowerCAmelCase__ : Any):
with tf.name_scope(self.name or "WarmUp") as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
SCREAMING_SNAKE_CASE_: Any = tf.cast(lowerCAmelCase__ , tf.floataa)
SCREAMING_SNAKE_CASE_: Optional[Any] = tf.cast(self.warmup_steps , tf.floataa)
SCREAMING_SNAKE_CASE_: Optional[int] = global_step_float / warmup_steps_float
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.initial_learning_rate * tf.math.pow(lowerCAmelCase__ , self.power)
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps) , name=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 0.9 , _UpperCAmelCase = 0.9_9_9 , _UpperCAmelCase = 1e-8 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1.0 , _UpperCAmelCase = None , ):
SCREAMING_SNAKE_CASE_: Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_UpperCAmelCase , )
if num_warmup_steps:
SCREAMING_SNAKE_CASE_: Tuple = WarmUp(
initial_learning_rate=_UpperCAmelCase , decay_schedule_fn=_UpperCAmelCase , warmup_steps=_UpperCAmelCase , )
if weight_decay_rate > 0.0:
SCREAMING_SNAKE_CASE_: List[str] = AdamWeightDecay(
learning_rate=_UpperCAmelCase , weight_decay_rate=_UpperCAmelCase , beta_a=_UpperCAmelCase , beta_a=_UpperCAmelCase , epsilon=_UpperCAmelCase , clipnorm=_UpperCAmelCase , global_clipnorm=_UpperCAmelCase , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=_UpperCAmelCase , )
else:
SCREAMING_SNAKE_CASE_: int = tf.keras.optimizers.Adam(
learning_rate=_UpperCAmelCase , beta_a=_UpperCAmelCase , beta_a=_UpperCAmelCase , epsilon=_UpperCAmelCase , clipnorm=_UpperCAmelCase , global_clipnorm=_UpperCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , lowerCAmelCase__ : float = 0.9 , lowerCAmelCase__ : float = 0.999 , lowerCAmelCase__ : float = 1E-7 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[List[str]] = None , lowerCAmelCase__ : Optional[List[str]] = None , lowerCAmelCase__ : str = "AdamWeightDecay" , **lowerCAmelCase__ : int , ):
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = weight_decay_rate
SCREAMING_SNAKE_CASE_: List[Any] = include_in_weight_decay
SCREAMING_SNAKE_CASE_: List[Any] = exclude_from_weight_decay
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: List[str] = {"WarmUp": WarmUp}
return super(lowerCAmelCase__ , cls).from_config(lowerCAmelCase__ , custom_objects=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int]):
super(lowerCAmelCase__ , self)._prepare_local(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: str = self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=None , **lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = list(zip(*lowerCAmelCase__))
return super(lowerCAmelCase__ , self).apply_gradients(zip(lowerCAmelCase__ , lowerCAmelCase__) , name=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
SCREAMING_SNAKE_CASE_: Dict = apply_state or {}
SCREAMING_SNAKE_CASE_: List[str] = apply_state.get((var_device, var_dtype))
if coefficients is None:
SCREAMING_SNAKE_CASE_: Optional[int] = self._fallback_apply_state(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple=None):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self._decay_weights_op(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
with tf.control_dependencies([decay]):
return super(lowerCAmelCase__ , self)._resource_apply_dense(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict=None):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = self._decay_weights_op(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
with tf.control_dependencies([decay]):
return super(lowerCAmelCase__ , self)._resource_apply_sparse(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate})
return config
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCAmelCase__ , lowerCAmelCase__) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCAmelCase__ , lowerCAmelCase__) is not None:
return False
return True
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Any = []
SCREAMING_SNAKE_CASE_: Any = None
@property
def _SCREAMING_SNAKE_CASE ( self : int):
if self._accum_steps is None:
SCREAMING_SNAKE_CASE_: Tuple = tf.Variable(
tf.constant(0 , dtype=tf.intaa) , trainable=lowerCAmelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients")
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : str , lowerCAmelCase__ : Tuple):
if not self._gradients:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCAmelCase__) , trainable=lowerCAmelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
])
if len(lowerCAmelCase__) != len(self._gradients):
raise ValueError(F"Expected {len(self._gradients)} gradients, but got {len(lowerCAmelCase__)}")
for accum_gradient, gradient in zip(self._gradients , lowerCAmelCase__):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCAmelCase__)
self._accum_steps.assign_add(1)
def _SCREAMING_SNAKE_CASE ( self : int):
if not self._gradients:
return
self._accum_steps.assign(0)
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCAmelCase__))
| 127 | 1 |
"""simple docstring"""
from pathlib import Path
import fire
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: int ) -> Dict:
'''simple docstring'''
__lowerCamelCase : Tuple = Path(_lowerCamelCase )
__lowerCamelCase : str = Path(_lowerCamelCase )
dest_dir.mkdir(exist_ok=_lowerCamelCase )
for path in src_dir.iterdir():
__lowerCamelCase : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
__lowerCamelCase : Optional[Any] = dest_dir.joinpath(path.name )
print(_lowerCamelCase )
dest_path.open("w" ).write("\n".join(_lowerCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify) | 135 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''LayoutLMv3FeatureExtractor''']
__A = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 135 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowercase__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
lowercase__ = cvtColor(img, COLOR_BGR2GRAY)
def _snake_case ( ):
_lowerCamelCase : List[str] = cn.convert_to_negative(lowercase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def _snake_case ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase__ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _snake_case ( ):
_lowerCamelCase : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _snake_case ( ):
_lowerCamelCase : int = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_lowerCamelCase : List[Any] = canny.canny(lowercase__ )
# assert canny array for at least one True
assert canny_array.any()
def _snake_case ( ):
assert gg.gaussian_filter(lowercase__ , 5 , sigma=0.9 ).all()
def _snake_case ( ):
# laplace diagonals
_lowerCamelCase : Any = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
_lowerCamelCase : int = conv.img_convolve(lowercase__ , lowercase__ ).astype(lowercase__ )
assert res.any()
def _snake_case ( ):
assert med.median_filter(lowercase__ , 3 ).any()
def _snake_case ( ):
_lowerCamelCase : List[Any] = sob.sobel_filter(lowercase__ )
assert grad.any() and theta.any()
def _snake_case ( ):
_lowerCamelCase : Dict = sp.make_sepia(lowercase__ , 20 )
assert sepia.all()
def _snake_case ( lowercase__ = "digital_image_processing/image_data/lena_small.jpg" ):
_lowerCamelCase : Any = bs.Burkes(imread(lowercase__ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _snake_case ( lowercase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
_lowerCamelCase : Optional[Any] = rs.NearestNeighbour(imread(lowercase__ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _snake_case ( ):
_lowerCamelCase : str = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
_lowerCamelCase : List[str] = imread(lowercase__ , 0 )
# Test for get_neighbors_pixel function() return not None
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : Union[str, Any] = image[x_coordinate][y_coordinate]
_lowerCamelCase : Optional[int] = lbp.get_neighbors_pixel(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_lowerCamelCase : Dict = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_lowerCamelCase : Optional[Any] = lbp.local_binary_value(lowercase__ , lowercase__ , lowercase__ )
assert lbp_image.any() | 371 |
"""simple docstring"""
import re
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(lowercase__ , lowercase__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 12 | 0 |
import numpy as np
def _snake_case ( lowerCAmelCase : np.ndarray ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def _snake_case ( lowerCAmelCase : np.ndarray ):
"""simple docstring"""
return vector * sigmoid(lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __lowercase ( lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : List[Any] = 'huggingface/label-files'
UpperCamelCase_ : int = 'imagenet-1k-id2label.json'
UpperCamelCase_ : int = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) )
UpperCamelCase_ : str = {int(lowerCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCamelCase_ : str = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCamelCase_ : int = BitConfig(
conv_layer=lowerCamelCase , num_labels=1000 , idalabel=lowerCamelCase , labelaid=lowerCamelCase , )
return config
def __lowercase ( lowerCamelCase : int ):
if "stem.conv" in name:
UpperCamelCase_ : str = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
UpperCamelCase_ : str = name.replace('blocks' , 'layers' )
if "head.fc" in name:
UpperCamelCase_ : Dict = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
UpperCamelCase_ : List[str] = 'bit.' + name
if "bit" not in name and "classifier" not in name:
UpperCamelCase_ : int = 'bit.encoder.' + name
return name
def __lowercase ( ):
UpperCamelCase_ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def __lowercase ( lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : List[str]=False ):
UpperCamelCase_ : Optional[Any] = get_config(lowerCamelCase )
# load original model from timm
UpperCamelCase_ : Dict = create_model(lowerCamelCase , pretrained=lowerCamelCase )
timm_model.eval()
# load state_dict of original model
UpperCamelCase_ : str = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCamelCase_ : Tuple = state_dict.pop(lowerCamelCase )
UpperCamelCase_ : str = val.squeeze() if 'head' in key else val
# load HuggingFace model
UpperCamelCase_ : Dict = BitForImageClassification(lowerCamelCase )
model.eval()
model.load_state_dict(lowerCamelCase )
# create image processor
UpperCamelCase_ : int = create_transform(**resolve_data_config({} , model=lowerCamelCase ) )
UpperCamelCase_ : List[Any] = transform.transforms
UpperCamelCase_ : str = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
UpperCamelCase_ : Tuple = BitImageProcessor(
do_resize=lowerCamelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCamelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCamelCase_ : Dict = prepare_img()
UpperCamelCase_ : Any = transform(lowerCamelCase ).unsqueeze(0 )
UpperCamelCase_ : Union[str, Any] = processor(lowerCamelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCamelCase , lowerCamelCase )
# verify logits
with torch.no_grad():
UpperCamelCase_ : Dict = model(lowerCamelCase )
UpperCamelCase_ : Tuple = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCamelCase_ : List[Any] = timm_model(lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
a_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 175 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : str = 1_6
_lowercase : List[Any] = 3_2
def snake_case_ ( __SCREAMING_SNAKE_CASE : Accelerator , __SCREAMING_SNAKE_CASE : int = 16 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase_ : List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__SCREAMING_SNAKE_CASE : Dict ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase_ : str = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__SCREAMING_SNAKE_CASE : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase_ : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase_ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowercase_ : Union[str, Any] = 8
else:
lowercase_ : int = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase_ : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase : Optional[int] = mocked_dataloaders # noqa: F811
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __SCREAMING_SNAKE_CASE ) == "1":
lowercase_ : List[Any] = 2
# New Code #
lowercase_ : str = int(args.gradient_accumulation_steps )
lowercase_ : Dict = int(args.local_sgd_steps )
# Initialize accelerator
lowercase_ : Dict = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__SCREAMING_SNAKE_CASE )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ : Optional[int] = config['''lr''']
lowercase_ : Tuple = int(config['''num_epochs'''] )
lowercase_ : Optional[Any] = int(config['''seed'''] )
lowercase_ : Optional[Any] = int(config['''batch_size'''] )
lowercase_ : str = evaluate.load('''glue''' , '''mrpc''' )
set_seed(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : str = get_dataloaders(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ : List[str] = model.to(accelerator.device )
# Instantiate optimizer
lowercase_ : Optional[int] = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
lowercase_ : List[str] = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
with LocalSGD(
accelerator=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , local_sgd_steps=__SCREAMING_SNAKE_CASE , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = output.loss
accelerator.backward(__SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : str = outputs.logits.argmax(dim=-1 )
lowercase_ , lowercase_ : str = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __SCREAMING_SNAKE_CASE )
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Any = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__SCREAMING_SNAKE_CASE , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=__SCREAMING_SNAKE_CASE , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase_ : List[Any] = parser.parse_args()
lowercase_ : Tuple = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 264 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = num_of_nodes
lowercase_ : list[list[int]] = []
lowercase_ : dict[int, int] = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowercase_ : Optional[int] = self.find_component(__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
lowercase_ : Any = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__SCREAMING_SNAKE_CASE )
elif component_size[u_node] >= component_size[v_node]:
lowercase_ : int = self.find_component(__SCREAMING_SNAKE_CASE )
component_size[u_node] += component_size[v_node]
self.set_component(__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = []
lowercase_ : Optional[Any] = 0
lowercase_ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowercase_ : Union[str, Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowercase_ , lowercase_ , lowercase_ : List[Any] = edge
lowercase_ : Dict = self.m_component[u]
lowercase_ : Any = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowercase_ : Union[str, Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ , lowercase_ , lowercase_ : str = edge
lowercase_ : Tuple = self.m_component[u]
lowercase_ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
lowercase_ : str = [-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def snake_case_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : bytes ) -> str:
return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] )
def _lowerCamelCase ( lowercase : str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowercase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return []
a__ , a__ : int =min(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE )
a__ : Optional[int] =int(max_value - min_value ) + 1
a__ : list[list] =[[] for _ in range(SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 95 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''sentencepiece.bpe.model'''}
a = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
a = {
'''camembert-base''': 512,
}
a = '''▁'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = VOCAB_FILES_NAMES
UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : List[Any]="</s>" , _UpperCAmelCase : Tuple="</s>" , _UpperCAmelCase : int="<s>" , _UpperCAmelCase : Optional[Any]="<unk>" , _UpperCAmelCase : List[str]="<pad>" , _UpperCAmelCase : Dict="<mask>" , _UpperCAmelCase : str=["<s>NOTUSED", "</s>NOTUSED"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
_A = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_A = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
_A = len(self.fairseq_tokens_to_ids )
_A = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase_ ( self : str ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def lowerCAmelCase_ ( self : Any ):
_A = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_UpperCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[str] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : List[str] ):
_A = []
_A = ''
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
_A = True
_A = []
else:
current_sub_tokens.append(_UpperCAmelCase )
_A = False
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def __getstate__( self : str ):
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : str , _UpperCAmelCase : Optional[int] ):
_A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 366 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a = logging.get_logger(__name__) # pylint: disable=invalid-name
a = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def _snake_case ( _snake_case : Dict , _snake_case : List[Any] , _snake_case : List[Any]=8 ) -> Optional[int]:
'''simple docstring'''
_A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : DDPMScheduler , _UpperCAmelCase : VQModel , ):
super().__init__()
self.register_modules(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , movq=_UpperCAmelCase , )
_A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : int ):
if latents is None:
_A = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A = latents.to(_UpperCAmelCase )
_A = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Tuple=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_A = torch.device(F'''cuda:{gpu_id}''' )
_A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any]=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
_A = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A , _A = cpu_offload_with_hook(_UpperCAmelCase , _UpperCAmelCase , prev_module_hook=_UpperCAmelCase )
# We'll offload the last model manually.
_A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self : int ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCAmelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCAmelCase )
def __call__( self : Union[str, Any] , _UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 100 , _UpperCAmelCase : float = 4.0 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , ):
_A = self._execution_device
_A = guidance_scale > 1.0
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = torch.cat(_UpperCAmelCase , dim=0 )
_A = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = torch.cat(_UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
_A = image_embeds.repeat_interleave(_UpperCAmelCase , dim=0 )
_A = negative_image_embeds.repeat_interleave(_UpperCAmelCase , dim=0 )
_A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCAmelCase )
self.scheduler.set_timesteps(_UpperCAmelCase , device=_UpperCAmelCase )
_A = self.scheduler.timesteps
_A = self.unet.config.in_channels
_A , _A = downscale_height_and_width(_UpperCAmelCase , _UpperCAmelCase , self.movq_scale_factor )
# create initial latent
_A = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = {'image_embeds': image_embeds}
_A = self.unet(
sample=_UpperCAmelCase , timestep=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , added_cond_kwargs=_UpperCAmelCase , return_dict=_UpperCAmelCase , )[0]
if do_classifier_free_guidance:
_A , _A = noise_pred.split(latents.shape[1] , dim=1 )
_A , _A = noise_pred.chunk(2 )
_A , _A = variance_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A , _A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase , )[0]
# post-processing
_A = self.movq.decode(_UpperCAmelCase , force_not_quantize=_UpperCAmelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_A = image * 0.5 + 0.5
_A = image.clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 271 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def _a ( a :Optional[int] ) -> List[Any]:
a = torch.load(a , map_location='''cpu''' )
if "model" in sd.keys():
a = torch.load(a , map_location='''cpu''' )['''model''']
# pop unnecessary weights
a = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(a )
a = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
a = sd.pop(a )
a = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
a = sd[key]
# We split QKV in separate Q,K,V
a = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
a = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
a = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
a = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
a , a , a = torch.split(a , depth // 3 , dim=0 )
a = q
a = k
a = v
del sd[key]
return sd
@torch.no_grad()
def _a ( a :str , a :int , a :List[Any]=None ) -> Optional[Any]:
a = load_checkpoint(a )
if config is not None:
a = OPTConfig.from_pretrained(a )
else:
a = OPTConfig()
a = OPTModel(a ).half().eval()
model.load_state_dict(a )
# Check results
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
UpperCAmelCase__ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__A : Any = TypeVar('''T''')
class __A ( Generic[T] ):
def __init__( self : Dict , UpperCAmelCase_ : list[T] , UpperCAmelCase_ : Callable[[T, T], T] ):
lowerCAmelCase : Any | T = None
lowerCAmelCase : int = len(UpperCAmelCase_ )
lowerCAmelCase : list[T] = [any_type for _ in range(self.N )] + arr
lowerCAmelCase : List[Any] = fnc
self.build()
def lowercase__ ( self : str ):
for p in range(self.N - 1 , 0 , -1 ):
lowerCAmelCase : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase__ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : T ):
p += self.N
lowerCAmelCase : int = v
while p > 1:
lowerCAmelCase : List[Any] = p // 2
lowerCAmelCase : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): # noqa: E741
lowerCAmelCase , lowerCAmelCase : str = l + self.N, r + self.N
lowerCAmelCase : T | None = None
while l <= r:
if l % 2 == 1:
lowerCAmelCase : Any = self.st[l] if res is None else self.fn(UpperCAmelCase_ , self.st[l] )
if r % 2 == 0:
lowerCAmelCase : Optional[int] = self.st[r] if res is None else self.fn(UpperCAmelCase_ , self.st[r] )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__A : str = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__A : List[Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__A : Optional[int] = SegmentTree(test_array, min)
__A : Optional[int] = SegmentTree(test_array, max)
__A : Dict = SegmentTree(test_array, lambda a, b: a + b)
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
for i in range(len(_UpperCAmelCase ) ):
for j in range(_UpperCAmelCase, len(_UpperCAmelCase ) ):
lowerCAmelCase : str = reduce(_UpperCAmelCase, test_array[i : j + 1] )
lowerCAmelCase : Dict = reduce(_UpperCAmelCase, test_array[i : j + 1] )
lowerCAmelCase : str = reduce(lambda _UpperCAmelCase, _UpperCAmelCase : a + b, test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_UpperCAmelCase, _UpperCAmelCase )
assert max_range == max_segment_tree.query(_UpperCAmelCase, _UpperCAmelCase )
assert sum_range == sum_segment_tree.query(_UpperCAmelCase, _UpperCAmelCase )
test_all_segments()
for index, value in test_updates.items():
__A : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 138 | 0 |
'''simple docstring'''
import os
import sys
import unittest
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_SCREAMING_SNAKE_CASE = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
_SCREAMING_SNAKE_CASE = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[Any] )-> Tuple:
snake_case = get_test_to_tester_mapping(__snake_case )
snake_case = get_test_to_tester_mapping(__snake_case )
snake_case = {"""BertModelTest""": """BertModelTester"""}
snake_case = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(__snake_case ) , __snake_case )
self.assertEqual(get_test_info.to_json(__snake_case ) , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
snake_case = get_model_to_test_mapping(__snake_case )
snake_case = get_model_to_test_mapping(__snake_case )
snake_case = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
snake_case = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(__snake_case ) , __snake_case )
self.assertEqual(get_test_info.to_json(__snake_case ) , __snake_case )
def lowerCAmelCase ( self : Optional[Any] )-> int:
snake_case = get_model_to_tester_mapping(__snake_case )
snake_case = get_model_to_tester_mapping(__snake_case )
snake_case = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
snake_case = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(__snake_case ) , __snake_case )
self.assertEqual(get_test_info.to_json(__snake_case ) , __snake_case )
| 3 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : dict ) -> str:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , """html.parser""" )
snake_case = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
snake_case = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 3 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : str = BertJapaneseTokenizer
_snake_case : Dict = False
_snake_case : int = True
def __UpperCAmelCase ( self ) -> str:
super().setUp()
UpperCAmelCase_ : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
UpperCAmelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : List[Any] = 'こんにちは、世界。 \nこんばんは、世界。'
UpperCAmelCase_ : List[Any] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.get_input_output_texts(_UpperCamelCase )
UpperCAmelCase_ : int = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : Tuple = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
return text, ids
def __UpperCAmelCase ( self ) -> Dict:
pass # TODO add if relevant
def __UpperCAmelCase ( self ) -> Optional[Any]:
pass # TODO add if relevant
def __UpperCAmelCase ( self ) -> Optional[Any]:
pass # TODO add if relevant
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Any = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ : Dict = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(_UpperCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(_UpperCamelCase )
UpperCAmelCase_ : List[str] = 'こんにちは、世界。\nこんばんは、世界。'
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_UpperCamelCase , 'wb' ) as handle:
pickle.dump(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , 'rb' ) as handle:
UpperCAmelCase_ : str = pickle.load(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = tokenizer_new.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
try:
UpperCAmelCase_ : List[str] = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def __UpperCAmelCase ( self ) -> Optional[Any]:
try:
UpperCAmelCase_ : Optional[int] = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Dict = MecabTokenizer(do_lower_case=_UpperCamelCase , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def __UpperCAmelCase ( self ) -> str:
try:
UpperCAmelCase_ : int = MecabTokenizer(
do_lower_case=_UpperCamelCase , normalize_text=_UpperCamelCase , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Any = MecabTokenizer(normalize_text=_UpperCamelCase , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(_UpperCamelCase )
UpperCAmelCase_ : Any = 'こんにちは、世界。\nこんばんは、世界。'
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_UpperCamelCase , 'wb' ) as handle:
pickle.dump(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , 'rb' ) as handle:
UpperCAmelCase_ : Tuple = pickle.load(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = tokenizer_new.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@require_sudachi
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Tuple = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Tuple = SudachiTokenizer(do_lower_case=_UpperCamelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : List[str] = SudachiTokenizer(normalize_text=_UpperCamelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[Any] = SudachiTokenizer(trim_whitespace=_UpperCamelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = 'こんにちは、世界。\nこんばんは、世界。'
UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
UpperCAmelCase_ : int = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_UpperCamelCase , 'wb' ) as handle:
pickle.dump(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , 'rb' ) as handle:
UpperCAmelCase_ : str = pickle.load(_UpperCamelCase )
UpperCAmelCase_ : Dict = tokenizer_new.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@require_jumanpp
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : int = JumanppTokenizer(do_lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = JumanppTokenizer(normalize_text=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = JumanppTokenizer(trim_whitespace=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
UpperCAmelCase_ : Any = {}
for i, token in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = i
UpperCAmelCase_ : List[Any] = WordpieceTokenizer(vocab=_UpperCamelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
UpperCAmelCase_ : str = tokenizer.subword_tokenizer
UpperCAmelCase_ : Union[str, Any] = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(_UpperCamelCase , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
UpperCAmelCase_ : Any = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(_UpperCamelCase , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode('ありがとう。' , add_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : str = tokenizer.encode('どういたしまして。' , add_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : int = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Dict = BertJapaneseTokenizer
_snake_case : Any = False
def __UpperCAmelCase ( self ) -> Tuple:
super().setUp()
UpperCAmelCase_ : List[str] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
UpperCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Optional[int]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = 'こんにちは、世界。 \nこんばんは、世界。'
UpperCAmelCase_ : Optional[int] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def __UpperCAmelCase ( self ) -> Dict:
pass # TODO add if relevant
def __UpperCAmelCase ( self ) -> List[str]:
pass # TODO add if relevant
def __UpperCAmelCase ( self ) -> str:
pass # TODO add if relevant
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
_UpperCamelCase , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : List[str] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
UpperCAmelCase_ : Dict = {}
for i, token in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : Any = i
UpperCAmelCase_ : int = CharacterTokenizer(vocab=_UpperCamelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
UpperCAmelCase_ : Dict = tokenizer.encode('ありがとう。' , add_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : Dict = tokenizer.encode('どういたしまして。' , add_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : Any = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = 'cl-tohoku/bert-base-japanese'
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(_UpperCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
UpperCAmelCase_ : List[Any] = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(_UpperCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 29 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
A =logging.get_logger(__name__)
A =TypeVar('DatasetType', Dataset, IterableDataset)
def snake_case_ (_a : List[DatasetType] , _a : Optional[List[float]] = None , _a : Optional[int] = None , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_a ):
if not isinstance(_a , (Dataset, IterableDataset) ):
if isinstance(_a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(_a )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." )
if i == 0:
UpperCAmelCase , UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset)
)
elif not isinstance(_a , _a ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_a , _a , _a , info=_a , split=_a , stopping_strategy=_a )
else:
return _interleave_iterable_datasets(
_a , _a , _a , info=_a , split=_a , stopping_strategy=_a )
def snake_case_ (_a : List[DatasetType] , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : int = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_a ):
if not isinstance(_a , (Dataset, IterableDataset) ):
if isinstance(_a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(_a )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." )
if i == 0:
UpperCAmelCase , UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset)
)
elif not isinstance(_a , _a ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_a , info=_a , split=_a , axis=_a )
else:
return _concatenate_iterable_datasets(_a , info=_a , split=_a , axis=_a )
| 34 | 0 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCAmelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class _a ( _lowerCamelCase ):
def __init__( self: Tuple , UpperCamelCase_: int = 101 ) -> Optional[int]:
"""simple docstring"""
lowercase__ = length
def __len__( self: int ) -> Optional[int]:
"""simple docstring"""
return self.length
def __getitem__( self: Dict , UpperCamelCase_: Union[str, Any] ) -> Dict:
"""simple docstring"""
return i
class _a :
def __call__( self: Optional[int] , UpperCamelCase_: Optional[Any] ) -> Tuple:
"""simple docstring"""
return {"input_ids": torch.tensor(lowercase_ ), "labels": torch.tensor(lowercase_ )}
class _a ( nn.Module ):
def __init__( self: List[str] ) -> int:
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase__ = nn.Linear(120 , 80 )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[str]=None ) -> int:
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class _a ( _lowerCamelCase ):
@require_torch_neuroncore
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = f'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'--output_dir {output_dir}'.split()
lowercase__ = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowercase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class _a ( _lowerCamelCase ):
@require_torch_multi_gpu
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = f'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'--output_dir {output_dir}'.split()
lowercase__ = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowercase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCAmelCase = HfArgumentParser((TrainingArguments,))
lowerCAmelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowerCAmelCase = DummyDataset(dataset_length)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = list(range(len(__UpperCAmelCase ) ) )
lowercase__ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
lowerCAmelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase = 2
lowerCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase = None | 356 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '<<<<<<< This should probably be modified because it mentions: '
lowerCAmelCase = '=======\n>>>>>>>\n'
lowerCAmelCase = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _a ( UpperCamelCase__ ):
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: ArgumentParser ) -> int:
"""simple docstring"""
lowercase__ = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self: Dict , UpperCamelCase_: str , UpperCamelCase_: str , *UpperCamelCase_: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = get_logger('''datasets-cli/converting''' )
lowercase__ = tfds_path
lowercase__ = datasets_directory
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
lowercase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__ = os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
lowercase__ = []
lowercase__ = []
lowercase__ = {}
if os.path.isdir(self._tfds_path ):
lowercase__ = os.listdir(UpperCamelCase_ )
else:
lowercase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if not os.path.isfile(UpperCamelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(UpperCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = False
lowercase__ = False
lowercase__ = []
for line in lines:
lowercase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__ = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__ = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__ = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__ = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__ = True
lowercase__ = list(filter(lambda UpperCamelCase_ : e in out_line , UpperCamelCase_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCamelCase_ ) + '''\n''' )
out_lines.append(UpperCamelCase_ )
out_lines.append(UpperCamelCase_ )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__ = re.sub(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__ = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , UpperCamelCase_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__ = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__ = True
out_lines.append(UpperCamelCase_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__ = f_name.replace('''.py''' , '''''' )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(UpperCamelCase_ )
if needs_manual_update:
with_manual_update.append(UpperCamelCase_ )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(UpperCamelCase_ )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
lowercase__ = os.path.basename(UpperCamelCase_ )
lowercase__ = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(UpperCamelCase_ , UpperCamelCase_ )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 93 | 0 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
a_ = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def lowerCamelCase__ ( _a , _a , _a , _a , _a , _a , _a , _a=False , ):
output_path.parent.mkdir(parents=_a , exist_ok=_a)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_a , _a , f=output_path.as_posix() , input_names=_a , output_names=_a , dynamic_axes=_a , do_constant_folding=_a , use_external_data_format=_a , enable_onnx_checker=_a , opset_version=_a , )
else:
export(
_a , _a , f=output_path.as_posix() , input_names=_a , output_names=_a , dynamic_axes=_a , do_constant_folding=_a , opset_version=_a , )
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a , _a = False):
SCREAMING_SNAKE_CASE : str = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
SCREAMING_SNAKE_CASE : Dict = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA")
else:
SCREAMING_SNAKE_CASE : Tuple = "cpu"
SCREAMING_SNAKE_CASE : Any = StableDiffusionPipeline.from_pretrained(_a , torch_dtype=_a).to(_a)
SCREAMING_SNAKE_CASE : List[Any] = Path(_a)
# TEXT ENCODER
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.text_encoder.config.max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = pipeline.text_encoder.config.hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=_a , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_a , dtype=torch.intaa)) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=_a , )
del pipeline.text_encoder
# UNET
SCREAMING_SNAKE_CASE : Dict = pipeline.unet.config.in_channels
SCREAMING_SNAKE_CASE : str = pipeline.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , _a , _a , _a).to(device=_a , dtype=_a),
torch.randn(2).to(device=_a , dtype=_a),
torch.randn(2 , _a , _a).to(device=_a , dtype=_a),
False,
) , output_path=_a , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=_a , use_external_data_format=_a , )
SCREAMING_SNAKE_CASE : List[Any] = str(unet_path.absolute().as_posix())
SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_a)
SCREAMING_SNAKE_CASE : List[Any] = onnx.load(_a)
# clean up existing tensor files
shutil.rmtree(_a)
os.mkdir(_a)
# collate external tensor files into one
onnx.save_model(
_a , _a , save_as_external_data=_a , all_tensors_to_one_file=_a , location="weights.pb" , convert_attribute=_a , )
del pipeline.unet
# VAE ENCODER
SCREAMING_SNAKE_CASE : Any = pipeline.vae
SCREAMING_SNAKE_CASE : Tuple = vae_encoder.config.in_channels
SCREAMING_SNAKE_CASE : Optional[Any] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
SCREAMING_SNAKE_CASE : str = lambda _a , _a: vae_encoder.encode(_a , _a)[0].sample()
onnx_export(
_a , model_args=(
torch.randn(1 , _a , _a , _a).to(device=_a , dtype=_a),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=_a , )
# VAE DECODER
SCREAMING_SNAKE_CASE : str = pipeline.vae
SCREAMING_SNAKE_CASE : Tuple = vae_decoder.config.latent_channels
SCREAMING_SNAKE_CASE : str = vae_decoder.config.out_channels
# forward only through the decoder part
SCREAMING_SNAKE_CASE : List[Any] = vae_encoder.decode
onnx_export(
_a , model_args=(
torch.randn(1 , _a , _a , _a).to(device=_a , dtype=_a),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=_a , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
SCREAMING_SNAKE_CASE : int = pipeline.safety_checker
SCREAMING_SNAKE_CASE : List[str] = safety_checker.config.vision_config.num_channels
SCREAMING_SNAKE_CASE : List[Any] = safety_checker.config.vision_config.image_size
SCREAMING_SNAKE_CASE : List[str] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , _a , _a , _a , ).to(device=_a , dtype=_a),
torch.randn(1 , _a , _a , _a).to(device=_a , dtype=_a),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=_a , )
del pipeline.safety_checker
SCREAMING_SNAKE_CASE : Optional[int] = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker")
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.feature_extractor
else:
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder") , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder") , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder") , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet") , scheduler=pipeline.scheduler , safety_checker=_a , feature_extractor=_a , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(_a)
print("ONNX pipeline saved to" , _a)
del pipeline
del onnx_pipeline
SCREAMING_SNAKE_CASE : Any = OnnxStableDiffusionPipeline.from_pretrained(_a , provider="CPUExecutionProvider")
print("ONNX pipeline is loadable")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
a_ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa) | 76 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class A__ :
'''simple docstring'''
def __init__( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = psutil.Process()
__lowerCAmelCase : str = False
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = -1
while True:
__lowerCAmelCase : str = max(self.process.memory_info().rss , self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : str = threading.Thread(target=self.peak_monitor)
__lowerCAmelCase : Tuple = True
self.thread.start()
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = False
self.thread.join()
return self.cpu_memory_peak
__snake_case : Tuple = PeakCPUMemory()
def _lowercase ( ) -> str:
# Time
__lowerCAmelCase : str = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : Optional[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = torch.cuda.memory_allocated(__snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowercase ( __snake_case ) -> Optional[Any]:
# Time
__lowerCAmelCase : str = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : str = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase : List[str] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = (torch.cuda.memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
__lowerCAmelCase : Any = (torch.cuda.max_memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
return measures
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__snake_case )]:.2f}MiB""" )
__lowerCAmelCase : Optional[Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" ) | 269 | 0 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_A = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase__ ( a__ : List[Any] , a__ : List[str] , a__ : Union[str, Any] , a__ : int , a__ : Union[str, Any] ) -> Any:
for attribute in key.split(""".""" ):
UpperCamelCase_ = getattr(a__ , a__ )
if weight_type is not None:
UpperCamelCase_ = getattr(a__ , a__ ).shape
else:
UpperCamelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCamelCase_ = value
elif weight_type == "weight_g":
UpperCamelCase_ = value
elif weight_type == "weight_v":
UpperCamelCase_ = value
elif weight_type == "bias":
UpperCamelCase_ = value
else:
UpperCamelCase_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase__ ( a__ : Tuple , a__ : Any ) -> Optional[Any]:
UpperCamelCase_ = []
UpperCamelCase_ = fairseq_model.state_dict()
UpperCamelCase_ = hf_model.feature_extractor
UpperCamelCase_ = hf_model.adapter
for name, value in fairseq_dict.items():
UpperCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == """group""" , )
UpperCamelCase_ = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(a__ , a__ , a__ , a__ )
UpperCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCamelCase_ = True
if "*" in mapped_key:
UpperCamelCase_ = name.split(a__ )[0].split(""".""" )[-2]
UpperCamelCase_ = mapped_key.replace("""*""" , a__ )
if "weight_g" in name:
UpperCamelCase_ = """weight_g"""
elif "weight_v" in name:
UpperCamelCase_ = """weight_v"""
elif "bias" in name:
UpperCamelCase_ = """bias"""
elif "weight" in name:
UpperCamelCase_ = """weight"""
else:
UpperCamelCase_ = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCamelCase__ ( a__ : int , a__ : Dict , a__ : Any , a__ : Optional[Any] , a__ : Tuple ) -> List[str]:
UpperCamelCase_ = full_name.split("""conv_layers.""" )[-1]
UpperCamelCase_ = name.split(""".""" )
UpperCamelCase_ = int(items[0] )
UpperCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCamelCase_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCamelCase_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCamelCase_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCamelCase_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(a__ )
def lowerCamelCase__ ( a__ : List[str] , a__ : Optional[Any] , a__ : Dict , a__ : List[str] ) -> int:
UpperCamelCase_ = full_name.split("""adaptor.""" )[-1]
UpperCamelCase_ = name.split(""".""" )
if items[1].isdigit():
UpperCamelCase_ = int(items[1] )
else:
UpperCamelCase_ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
UpperCamelCase_ = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
UpperCamelCase_ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
UpperCamelCase_ = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
UpperCamelCase_ = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(a__ , a__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
UpperCamelCase_ = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
UpperCamelCase_ = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(a__ )
def lowerCamelCase__ ( a__ : Union[str, Any] ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = emb.weight.shape
UpperCamelCase_ = nn.Linear(a__ , a__ , bias=a__ )
UpperCamelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( a__ : Optional[int] , a__ : Any , a__ : Optional[int] , a__ : Any , a__ : int , a__ : Union[str, Any] , a__ : Tuple , a__ : Dict , a__ : Optional[Any] , a__ : List[str] , a__ : Union[str, Any] , ) -> List[str]:
UpperCamelCase_ = WavaVecaConfig.from_pretrained(
a__ , add_adapter=a__ , adapter_stride=a__ , adapter_kernel_size=a__ , use_auth_token=a__ , output_hidden_size=a__ , )
UpperCamelCase_ = MBartConfig.from_pretrained(a__ )
# load model
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
UpperCamelCase_ = model[0].eval()
# load feature extractor
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained(a__ , use_auth_token=a__ )
# set weights for wav2vec2 encoder
UpperCamelCase_ = WavaVecaModel(a__ )
recursively_load_weights_wavaveca(model.encoder , a__ )
# load decoder weights
UpperCamelCase_ = MBartForCausalLM(a__ )
UpperCamelCase_ , UpperCamelCase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a__ )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
UpperCamelCase_ = SpeechEncoderDecoderModel(encoder=a__ , decoder=a__ )
UpperCamelCase_ = False
UpperCamelCase_ = MBartaaTokenizer(a__ )
tokenizer.save_pretrained(a__ )
UpperCamelCase_ = hf_wavavec.config.to_dict()
UpperCamelCase_ = tokenizer.pad_token_id
UpperCamelCase_ = tokenizer.bos_token_id
UpperCamelCase_ = tokenizer.eos_token_id
UpperCamelCase_ = """mbart50"""
UpperCamelCase_ = """wav2vec2"""
UpperCamelCase_ = tokenizer.eos_token_id
UpperCamelCase_ = 25_0004
UpperCamelCase_ = tokenizer.eos_token_id
UpperCamelCase_ = SpeechEncoderDecoderConfig.from_dict(a__ )
hf_wavavec.save_pretrained(a__ )
feature_extractor.save_pretrained(a__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1_024, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=250_004, type=int, help='''`decoder_start_token_id` of model config''')
_A = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 261 |
import comet # From: unbabel-comet
import torch
import datasets
_A = datasets.logging.get_logger(__name__)
_A = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
_A = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
_A = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
if self.config_name == "default":
UpperCamelCase_ = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
UpperCamelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=False ):
"""simple docstring"""
if gpus is None:
UpperCamelCase_ = 1 if torch.cuda.is_available() else 0
UpperCamelCase_ = {"""src""": sources, """mt""": predictions, """ref""": references}
UpperCamelCase_ = [dict(zip(__UpperCamelCase , __UpperCamelCase ) ) for t in zip(*data.values() )]
UpperCamelCase_ , UpperCamelCase_ = self.scorer.predict(__UpperCamelCase , gpus=__UpperCamelCase , progress_bar=__UpperCamelCase )
return {"mean_score": mean_score, "scores": scores}
| 261 | 1 |
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase )
for i in range(__UpperCamelCase ):
for j in range(i + 1 , __UpperCamelCase ):
if numbers[j] < numbers[i]:
SCREAMING_SNAKE_CASE__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__lowerCamelCase : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 219 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __UpperCAmelCase ( A : int , A : Any="shi-labs/oneformer_demo" ) -> Dict:
with open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) as f:
UpperCAmelCase_ : Union[str, Any] = json.load(A )
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : str = []
for key, info in class_info.items():
UpperCAmelCase_ : Tuple = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(A ) )
UpperCAmelCase_ : Any = thing_ids
UpperCAmelCase_ : Union[str, Any] = class_names
return metadata
class snake_case__ ( unittest.TestCase):
def __init__( self : Any , _A : str , _A : Optional[int]=7 , _A : Tuple=3 , _A : Tuple=30 , _A : List[Any]=4_00 , _A : Tuple=None , _A : Optional[Any]=True , _A : Optional[Any]=True , _A : Any=[0.5, 0.5, 0.5] , _A : Any=[0.5, 0.5, 0.5] , _A : List[str]=10 , _A : Optional[int]=False , _A : Union[str, Any]=2_55 , _A : List[Any]="shi-labs/oneformer_demo" , _A : str="ade20k_panoptic.json" , _A : List[Any]=10 , ) -> Any:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Optional[int] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Tuple = {'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size
UpperCAmelCase_ : int = do_normalize
UpperCAmelCase_ : List[Any] = image_mean
UpperCAmelCase_ : Dict = image_std
UpperCAmelCase_ : str = class_info_file
UpperCAmelCase_ : Optional[Any] = prepare_metadata(_A , _A )
UpperCAmelCase_ : Tuple = num_text
UpperCAmelCase_ : Union[str, Any] = repo_path
# for the post_process_functions
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : Dict = 10
UpperCAmelCase_ : int = 10
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : str = 4
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Union[str, Any] = do_reduce_labels
UpperCAmelCase_ : str = ignore_index
def A ( self : Dict ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A ( self : Any , _A : List[Any] , _A : List[str]=False ) -> Optional[Any]:
if not batched:
UpperCAmelCase_ : Any = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ : Union[str, Any] = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase_ : int = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase_ : List[Any] = self.size['''shortest_edge''']
UpperCAmelCase_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase_ : Dict = self.size['''shortest_edge''']
UpperCAmelCase_ : str = self.size['''shortest_edge''']
else:
UpperCAmelCase_ : Dict = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ : int = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase_ : List[str] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
def A ( self : Tuple ) -> str:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ = image_processing_class
def A ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : int = OneFormerImageProcessorTester(self )
@property
def A ( self : Any ) -> int:
return self.image_processing_tester.prepare_image_processor_dict()
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''ignore_index''' ) )
self.assertTrue(hasattr(_A , '''class_info_file''' ) )
self.assertTrue(hasattr(_A , '''num_text''' ) )
self.assertTrue(hasattr(_A , '''repo_path''' ) )
self.assertTrue(hasattr(_A , '''metadata''' ) )
self.assertTrue(hasattr(_A , '''do_reduce_labels''' ) )
def A ( self : Dict ) -> Dict:
pass
def A ( self : Tuple ) -> Dict:
# Initialize image_processor
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : int = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Tuple:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Dict ) -> Union[str, Any]:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processing_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processing_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase_ : Optional[int] = image_processor(
_A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : int , _A : Any=False , _A : List[Any]=False , _A : Any="np" ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase_ : Tuple = self.image_processing_tester.num_labels
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A )
if with_segmentation_maps:
UpperCAmelCase_ : Any = num_labels
if is_instance_map:
UpperCAmelCase_ : Any = list(range(_A ) ) * 2
UpperCAmelCase_ : Optional[Any] = dict(enumerate(_A ) )
UpperCAmelCase_ : Dict = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase_ : Dict = [Image.fromarray(_A ) for annotation in annotations]
UpperCAmelCase_ : Tuple = image_processor(
_A , ['''semantic'''] * len(_A ) , _A , return_tensors='''pt''' , instance_id_to_semantic_id=_A , pad_and_return_pixel_mask=_A , )
return inputs
def A ( self : int ) -> str:
pass
def A ( self : Tuple ) -> Union[str, Any]:
def common(_A : Optional[int]=False , _A : str=None ):
UpperCAmelCase_ : List[str] = self.comm_get_image_processor_inputs(
with_segmentation_maps=_A , is_instance_map=_A , segmentation_type=_A )
UpperCAmelCase_ : List[Any] = inputs['''mask_labels''']
UpperCAmelCase_ : Optional[Any] = inputs['''class_labels''']
UpperCAmelCase_ : int = inputs['''pixel_values''']
UpperCAmelCase_ : Tuple = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(_A , _A , _A ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_A ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_A )
common(is_instance_map=_A , segmentation_type='''pil''' )
common(is_instance_map=_A , segmentation_type='''pil''' )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : int = np.zeros((20, 50) )
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : List[Any] = binary_mask_to_rle(_A )
self.assertEqual(len(_A ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A ( self : Any ) -> List[Any]:
UpperCAmelCase_ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(_A )
self.assertEqual(len(_A ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase_ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase_ : Any = fature_extractor.post_process_semantic_segmentation(_A , target_sizes=_A )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_instance_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
UpperCAmelCase_ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : List[Any] = image_processor.post_process_panoptic_segmentation(_A , threshold=0 )
self.assertTrue(len(_A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 304 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
__A = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__A = BASE_URL + '/user'
# https://github.com/settings/tokens
__A = os.environ.get('USER_TOKEN', '')
def _lowerCamelCase(__UpperCamelCase ) -> dict[Any, Any]:
_lowerCAmelCase ={
"""Authorization""": F'''token {auth_token}''',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 362 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( _a , _a , unittest.TestCase ):
UpperCamelCase : Dict = StableDiffusionSAGPipeline
UpperCamelCase : List[Any] = TEXT_TO_IMAGE_PARAMS
UpperCamelCase : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: int =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE_: Tuple =DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: str =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[str] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE_: Dict =CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE_: Optional[Any] ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any]=0 ) -> Dict:
'''simple docstring'''
if str(lowerCAmelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE_: str =torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] ={
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =sag_pipe.to(lowerCAmelCase )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple ='.'
SCREAMING_SNAKE_CASE_: int =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =sag_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE_: Tuple =output.images
SCREAMING_SNAKE_CASE_: Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: Tuple =np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE_: Dict =sag_pipe.to(lowerCAmelCase )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] ='.'
SCREAMING_SNAKE_CASE_: str =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Any =sag_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE_: List[str] =output.images
SCREAMING_SNAKE_CASE_: List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: int =np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE_: List[str] =sag_pipe.to(lowerCAmelCase )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] ='.'
SCREAMING_SNAKE_CASE_: Tuple =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[Any] =sag_pipe(
[prompt] , width=768 , height=512 , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
SCREAMING_SNAKE_CASE_: Tuple =output.images
assert image.shape == (1, 512, 768, 3)
| 173 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
if len(lowerCamelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
_lowercase : Tuple = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_snake_case = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
_snake_case = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase_ ( ):
_A : Optional[int] = (
list(range(ord("""!""" ),ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ),ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ),ord("""ÿ""" ) + 1 ) )
)
_A : str = bs[:]
_A : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
_A : List[str] = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_,snake_case_ ) )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = set()
_A : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : Tuple = char
return pairs
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "attention_mask"]
def __init__( self , _a , _a , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , **_a , ) -> Dict:
_A : Tuple = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
_A : List[str] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
_A : Optional[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
_A : Optional[int] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
_A : Optional[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
_A : int = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_A : Optional[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
errors=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , **_a , )
with open(_a , encoding="""utf-8""" ) as vocab_handle:
_A : Any = json.load(_a )
_A : Dict = {v: k for k, v in self.encoder.items()}
_A : str = errors # how to handle errors in decoding
_A : Union[str, Any] = bytes_to_unicode()
_A : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(_a , encoding="""utf-8""" ) as merges_handle:
_A : List[Any] = merges_handle.read().split("""\n""" )[1:-1]
_A : int = [tuple(merge.split() ) for merge in bpe_merges]
_A : List[str] = dict(zip(_a , range(len(_a ) ) ) )
_A : str = {}
_A : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_A : List[str] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def a__ ( self ) -> Dict:
return len(self.encoder )
def a__ ( self ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self , _a ) -> Optional[int]:
if token in self.cache:
return self.cache[token]
_A : List[str] = tuple(_a )
_A : Dict = get_pairs(_a )
if not pairs:
return token
while True:
_A : str = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A : Optional[Any] = bigram
_A : Optional[int] = []
_A : Any = 0
while i < len(_a ):
try:
_A : Union[str, Any] = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A : str = tuple(_a )
_A : Tuple = new_word
if len(_a ) == 1:
break
else:
_A : Dict = get_pairs(_a )
_A : int = """ """.join(_a )
_A : str = word
return word
def a__ ( self , _a ) -> List[str]:
_A : int = []
for token in re.findall(self.pat , _a ):
_A : Optional[int] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def a__ ( self , _a ) -> Optional[Any]:
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def a__ ( self , _a ) -> Optional[Any]:
return self.decoder.get(_a )
def a__ ( self , _a ) -> int:
_A : int = """""".join(_a )
_A : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : Union[str, Any] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Dict = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + """\n""" )
_A : List[str] = 0
with open(_a , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_A : Any = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def a__ ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def a__ ( self , _a , _a = None ) -> List[int]:
_A : int = [self.sep_token_id]
_A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self , _a , _a=False , **_a ) -> Optional[Any]:
_A : List[str] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
_A : Dict = """ """ + text
return (text, kwargs)
def a__ ( self , _a , _a = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def a__ ( self , _a ) -> List[int]:
_A : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_A : List[str] = """ """.join(_a )
_A : Dict = self.encode(_a )
if len(_a ) > self.model_max_length:
_A : str = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 343 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "audio-spectrogram-transformer"
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-12 , _a=16 , _a=True , _a=10 , _a=10 , _a=1024 , _a=128 , **_a , ) -> List[Any]:
super().__init__(**_a )
_A : Any = hidden_size
_A : Tuple = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Any = intermediate_size
_A : Optional[Any] = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : Optional[Any] = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : str = patch_size
_A : Tuple = qkv_bias
_A : Dict = frequency_stride
_A : Union[str, Any] = time_stride
_A : Any = max_length
_A : Tuple = num_mel_bins
| 343 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "naver-clova-ix/donut-base-finetuned-docvqa"
A_ = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
A_ = "document_qa"
A_ = AutoProcessor
A_ = VisionEncoderDecoderModel
A_ = ["image", "text"]
A_ = ["text"]
def __init__( self , *__a , **__a ):
'''simple docstring'''
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*__a , **__a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : List[str] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__a : Any = task_prompt.replace('{user_input}' , __a )
__a : Optional[Any] = self.pre_processor.tokenizer(
__a , add_special_tokens=__a , return_tensors='pt' ).input_ids
__a : Optional[int] = self.pre_processor(__a , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__a , ).sequences
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Union[str, Any] = self.pre_processor.batch_decode(__a )[0]
__a : str = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
__a : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
__a : Any = re.sub(r'<.*?>' , '' , __a , count=1 ).strip() # remove first task start token
__a : Tuple = self.pre_processor.tokenajson(__a )
return sequence["answer"]
| 27 | """simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
lowerCAmelCase__ : Union[str, Any] = namedtuple('CoinsDistribResult', 'moves excess')
def a_ ( lowerCamelCase ):
if root is None:
return 0
# Validation
def count_nodes(lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase ) != count_coins(lowerCamelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.left )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.right )
UpperCAmelCase__ = 1 - left_distrib_excess
UpperCAmelCase__ = 1 - right_distrib_excess
UpperCAmelCase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase )
+ abs(lowerCamelCase )
)
UpperCAmelCase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase , lowerCamelCase )
return get_distrib(lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = XLNetTokenizer
lowerCamelCase = XLNetTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : str = XLNetTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
snake_case : Optional[int] = '''<s>'''
snake_case : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<eod>''' )
self.assertEqual(len(UpperCamelCase__ ) , 1006 )
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
snake_case : Optional[int] = XLNetTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
snake_case : str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [285, 46, 10, 170, 382] )
snake_case : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
snake_case : int = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
snake_case : int = XLNetTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
snake_case : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
snake_case : int = XLNetTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
snake_case : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
snake_case : List[Any] = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
snake_case : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase__ )
snake_case : Optional[int] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase__ )
snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
snake_case : Dict = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
snake_case : Dict = {'''input_ids''': [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
| 83 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> list:
'''simple docstring'''
snake_case : Any = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case : Optional[int] = collection[i]
snake_case : str = 0
snake_case : List[Any] = i - 1
while low <= high:
snake_case : List[Any] = (low + high) // 2
if val < collection[mid]:
snake_case : List[str] = mid - 1
else:
snake_case : Optional[int] = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
snake_case : Dict = collection[j - 1]
snake_case : Union[str, Any] = val
return collection
if __name__ == "__main__":
lowercase__ = input("Enter numbers separated by a comma:\n").strip()
lowercase__ = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 83 | 1 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a : Any = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = ["pixel_values"]
def __init__( self , snake_case = True , snake_case = 1 / 2_5_5 , snake_case = True , snake_case = 8 , **snake_case , ):
'''simple docstring'''
super().__init__(**snake_case )
UpperCAmelCase : Optional[Any] = do_rescale
UpperCAmelCase : List[str] = rescale_factor
UpperCAmelCase : Optional[int] = do_pad
UpperCAmelCase : str = pad_size
def A_ ( self , snake_case , snake_case , snake_case = None , **snake_case ):
'''simple docstring'''
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def A_ ( self , snake_case , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = get_image_size(snake_case )
UpperCAmelCase : Tuple = (old_height // size + 1) * size - old_height
UpperCAmelCase : List[Any] = (old_width // size + 1) * size - old_width
return pad(snake_case , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=snake_case )
def A_ ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : Dict = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase : List[str] = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase : Optional[Any] = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase : Optional[int] = [to_numpy_array(snake_case ) for image in images]
if do_rescale:
UpperCAmelCase : Tuple = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_pad:
UpperCAmelCase : str = [self.pad(snake_case , size=snake_case ) for image in images]
UpperCAmelCase : Optional[int] = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
UpperCAmelCase : Union[str, Any] = {"pixel_values": images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 311 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])")
a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
a : str = re.compile(R"(?<!_)_(?!_)")
a : List[Any] = re.compile(R"(_{2,})")
a : List[Any] = R"^\w+(\.\w+)*$"
a : Dict = R"<>:/\|?*"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
return name.lower()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ )
UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __magic_name__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__magic_name__ )}-{split}"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
return F"{filepath}*"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
if shard_lengths:
UpperCAmelCase : Tuple = len(__magic_name__ )
UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )]
if filetype_suffix:
UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase : int = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 311 | 1 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
SCREAMING_SNAKE_CASE : List[Any] = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def UpperCamelCase ( _a ) -> List[str]:
'''simple docstring'''
lowercase_ :Any = (images / 2 + 0.5).clamp(0 , 1 )
lowercase_ :List[str] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ :Tuple = numpy_to_pil(__snake_case )
return images
def UpperCamelCase ( _a ) -> Tuple:
'''simple docstring'''
if images.ndim == 3:
lowercase_ :Any = images[None, ...]
lowercase_ :List[Any] = (images * 2_5_5).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase_ :Dict = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
lowercase_ :Any = [Image.fromarray(__snake_case ) for image in images]
return pil_images
| 362 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE : Any = random.Random()
def UpperCamelCase ( _a , _a=1.0 , _a=None , _a=None ) -> str:
'''simple docstring'''
if rng is None:
lowercase_ :Optional[Any] = global_rng
lowercase_ :List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=400 , UpperCamelCase_=2000 , UpperCamelCase_=1 , UpperCamelCase_=0.0 , UpperCamelCase_=1_6000 , UpperCamelCase_=True , UpperCamelCase_=True , ):
lowercase_ :Any = parent
lowercase_ :Any = batch_size
lowercase_ :int = min_seq_length
lowercase_ :Optional[int] = max_seq_length
lowercase_ :Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase_ :Any = feature_size
lowercase_ :str = padding_value
lowercase_ :Optional[int] = sampling_rate
lowercase_ :int = return_attention_mask
lowercase_ :Optional[Any] = do_normalize
def UpperCamelCase ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase ( self , UpperCamelCase_=False , UpperCamelCase_=False ):
def _flatten(UpperCamelCase_ ):
return list(itertools.chain(*UpperCamelCase_ ) )
if equal_length:
lowercase_ :Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowercase_ :List[Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase_ :int = [np.asarray(UpperCamelCase_ ) for x in speech_inputs]
return speech_inputs
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : int =WavaVecaFeatureExtractor
def UpperCamelCase ( self ):
lowercase_ :Tuple = WavaVecaFeatureExtractionTester(self )
def UpperCamelCase ( self , UpperCamelCase_ ):
self.assertTrue(np.all(np.mean(UpperCamelCase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase_ , axis=0 ) - 1 ) < 1E-3 ) )
def UpperCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase_ :int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase_ :Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :Any = [np.asarray(UpperCamelCase_ ) for speech_input in speech_inputs]
# Test not batched input
lowercase_ :Any = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
lowercase_ :List[str] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
# Test batched
lowercase_ :Optional[Any] = feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
lowercase_ :Tuple = feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase_ :Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase_ :int = np.asarray(UpperCamelCase_ )
lowercase_ :List[str] = feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
lowercase_ :Optional[Any] = feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :Any = ['''longest''', '''max_length''', '''do_not_pad''']
lowercase_ :int = [None, 1600, None]
for max_length, padding in zip(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[Any] = feat_extract(UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors='''np''' )
lowercase_ :Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :Union[str, Any] = range(800 , 1400 , 200 )
lowercase_ :Optional[int] = [floats_list((1, x) )[0] for x in lengths]
lowercase_ :Any = ['''longest''', '''max_length''', '''do_not_pad''']
lowercase_ :Optional[Any] = [None, 1600, None]
for max_length, padding in zip(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Any = feat_extract(UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ )
lowercase_ :Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :str = feat_extract(
UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=1000 , padding='''max_length''' , return_tensors='''np''' )
lowercase_ :Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :Optional[int] = feat_extract(
UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=1000 , padding='''longest''' , return_tensors='''np''' )
lowercase_ :Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowercase_ :Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :Optional[int] = feat_extract(
UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=2000 , padding='''longest''' , return_tensors='''np''' )
lowercase_ :int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def UpperCamelCase ( self ):
import torch
lowercase_ :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :Any = np.random.rand(100 ).astype(np.floataa )
lowercase_ :Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase_ :List[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowercase_ :Dict = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCamelCase ( self ):
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowercase_ :List[Any] = WavaVecaConfig.from_pretrained(UpperCamelCase_ )
lowercase_ :Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 252 | 0 |
'''simple docstring'''
import math
class a :
def A_ ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Optional[Any] ):
snake_case_ = 0.0
snake_case_ = 0.0
for i in range(len(_A ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def A_ ( self : Tuple , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : List[str] ):
for i in range(len(_A ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case_ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case_ = SelfOrganizingMap()
snake_case_ = 3
snake_case_ = 0.5
for _ in range(SCREAMING_SNAKE_CASE_ ):
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
# training sample
snake_case_ = training_samples[j]
# Compute the winning vector
snake_case_ = self_organizing_map.get_winner(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# Update the winning vector
snake_case_ = self_organizing_map.update(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# classify test sample
snake_case_ = [0, 0, 0, 1]
snake_case_ = self_organizing_map.get_winner(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# results
print(F"Clusters that the test sample belongs to : {winner}" )
print(F"Weights that have been trained : {weights}" )
# running the main() function
if __name__ == "__main__":
main()
| 56 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCamelCase__ = get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=0 ):
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCAmelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCAmelCase = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(F"""Saving model to {ckpt_dir}""" )
__lowerCAmelCase = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(SCREAMING_SNAKE_CASE_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
__lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Loading model from {input_model_file}""" )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCAmelCase = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Loading model from {input_model_file}""" )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCAmelCase = (
os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
__lowerCAmelCase = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , planner=DefaultLoadPlanner() , )
__lowerCAmelCase = state_dict["model"]
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str=0 ):
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCAmelCase = FSDP.optim_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCAmelCase = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCAmelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCAmelCase = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
__lowerCAmelCase = (
os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
__lowerCAmelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , )
__lowerCAmelCase = optim_state["optimizer"]
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
__lowerCAmelCase = FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
optimizer.load_state_dict(SCREAMING_SNAKE_CASE_ )
| 92 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a_ : Any = 'pt'
elif is_tf_available():
a_ : Union[str, Any] = 'tf'
else:
a_ : Tuple = 'jax'
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : Any = ByTaTokenizer
_lowercase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
super().setUp()
SCREAMING_SNAKE_CASE = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return ByTaTokenizer.from_pretrained('google/byt5-small')
def SCREAMING_SNAKE_CASE__ ( self , **a) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a=False , a=20 , a=5) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE = []
for i in range(len(a)):
try:
SCREAMING_SNAKE_CASE = tokenizer.decode([i] , clean_up_tokenization_spaces=a)
except UnicodeDecodeError:
pass
toks.append((i, tok))
SCREAMING_SNAKE_CASE = list(filter(lambda a: re.match(R'^[ a-zA-Z]+$' , t[1]) , a))
SCREAMING_SNAKE_CASE = list(filter(lambda a: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=a) , a))
if max_length is not None and len(a) > max_length:
SCREAMING_SNAKE_CASE = toks[:max_length]
if min_length is not None and len(a) < min_length and len(a) > 0:
while len(a) < min_length:
SCREAMING_SNAKE_CASE = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE = tokenizer.decode(a , clean_up_tokenization_spaces=a)
if " " not in output_txt and len(a) > 1:
SCREAMING_SNAKE_CASE = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a)
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a)
)
if with_prefix_space:
SCREAMING_SNAKE_CASE = ' ' + output_txt
SCREAMING_SNAKE_CASE = tokenizer.encode(a , add_special_tokens=a)
return output_txt, output_ids
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'])
SCREAMING_SNAKE_CASE = tokenizer(['hi', 'I went to the gym', ''])
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'])
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = 'Unicode €.'
SCREAMING_SNAKE_CASE = tokenizer(a)
SCREAMING_SNAKE_CASE = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , a)
# decoding
SCREAMING_SNAKE_CASE = tokenizer.decode(a)
self.assertEqual(a , 'Unicode €.</s>')
SCREAMING_SNAKE_CASE = tokenizer('e è é ê ë')
SCREAMING_SNAKE_CASE = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , a)
# decoding
SCREAMING_SNAKE_CASE = tokenizer.decode(a)
self.assertEqual(a , 'e è é ê ë</s>')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë')) , 'e è é ê ë</s>')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
SCREAMING_SNAKE_CASE = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE = tokenizer(a , padding=a , return_tensors=a)
self.assertIsInstance(a , a)
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE = list(batch.input_ids.numpy()[0])
else:
SCREAMING_SNAKE_CASE = list(batch.input_ids.tolist()[0])
self.assertListEqual(a , a)
self.assertEqual((2, 37) , batch.input_ids.shape)
self.assertEqual((2, 37) , batch.attention_mask.shape)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
SCREAMING_SNAKE_CASE = tokenizer(a , padding=a , return_tensors=a)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , a)
self.assertIn('attention_mask' , a)
self.assertNotIn('decoder_input_ids' , a)
self.assertNotIn('decoder_attention_mask' , a)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = [
'Summary of the text.',
'Another summary.',
]
SCREAMING_SNAKE_CASE = tokenizer(
text_target=a , max_length=32 , padding='max_length' , truncation=a , return_tensors=a)
self.assertEqual(32 , targets['input_ids'].shape[1])
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = ['A long paragraph for summarization. </s>']
SCREAMING_SNAKE_CASE = ['Summary of the text. </s>']
# fmt: off
SCREAMING_SNAKE_CASE = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE = tokenizer(a , text_target=a)
self.assertEqual(a , batch['input_ids'][0])
self.assertEqual(a , batch['labels'][0])
def SCREAMING_SNAKE_CASE__ ( self) -> str:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}'''):
self.assertNotEqual(tokenizer.model_max_length , 42)
# Now let's start the test
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}'''):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = ' He is very happy, UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE = tokenizer.encode(a , add_special_tokens=a)
tokenizer.save_pretrained(a)
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(a)
SCREAMING_SNAKE_CASE = after_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
shutil.rmtree(a)
SCREAMING_SNAKE_CASE = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}'''):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'])
SCREAMING_SNAKE_CASE = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token')
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens})
SCREAMING_SNAKE_CASE = tokenizer.encode(a , add_special_tokens=a)
tokenizer.save_pretrained(a)
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(a)
SCREAMING_SNAKE_CASE = after_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 42)
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(a , model_max_length=43)
self.assertEqual(tokenizer.model_max_length , 43)
shutil.rmtree(a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a)
with open(os.path.join(a , 'special_tokens_map.json') , encoding='utf-8') as json_file:
SCREAMING_SNAKE_CASE = json.load(a)
with open(os.path.join(a , 'tokenizer_config.json') , encoding='utf-8') as json_file:
SCREAMING_SNAKE_CASE = json.load(a)
SCREAMING_SNAKE_CASE = [f'''<extra_id_{i}>''' for i in range(125)]
SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [
'an_additional_special_token'
]
SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(a , 'special_tokens_map.json') , 'w' , encoding='utf-8') as outfile:
json.dump(a , a)
with open(os.path.join(a , 'tokenizer_config.json') , 'w' , encoding='utf-8') as outfile:
json.dump(a , a)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(
a , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=a)]
SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(
a , additional_special_tokens=a , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens)
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'])) , )
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a)
SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(a)
self.assertTrue(tokenizer.decode([255]) == '')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> str:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
SCREAMING_SNAKE_CASE = self.get_tokenizers(fast=a , do_lower_case=a)
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}'''):
SCREAMING_SNAKE_CASE = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_string(a)
self.assertIsInstance(a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}'''):
SCREAMING_SNAKE_CASE = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(
a , skip_special_tokens=a)
for attr in attributes_list:
setattr(a , attr + '_id' , a)
self.assertEqual(getattr(a , a) , a)
self.assertEqual(getattr(a , attr + '_id') , a)
setattr(a , attr + '_id' , a)
self.assertEqual(getattr(a , a) , a)
self.assertEqual(getattr(a , attr + '_id') , a)
setattr(a , 'additional_special_tokens_ids' , [])
self.assertListEqual(getattr(a , 'additional_special_tokens') , [])
self.assertListEqual(getattr(a , 'additional_special_tokens_ids') , [])
setattr(a , 'additional_special_tokens_ids' , [token_id_to_test_setters])
self.assertListEqual(getattr(a , 'additional_special_tokens') , [token_to_test_setters])
self.assertListEqual(getattr(a , 'additional_special_tokens_ids') , [token_id_to_test_setters])
| 327 |
import argparse
import datetime
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
SCREAMING_SNAKE_CASE = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_UpperCAmelCase) < 11:
raise ValueError('Must be 10 characters long')
# Get month
SCREAMING_SNAKE_CASE = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12')
SCREAMING_SNAKE_CASE = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'')
# Get day
SCREAMING_SNAKE_CASE = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31')
# Get second separator
SCREAMING_SNAKE_CASE = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'')
# Get year
SCREAMING_SNAKE_CASE = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?')
# Get datetime obj for validation
SCREAMING_SNAKE_CASE = datetime.date(int(_UpperCAmelCase) , int(_UpperCAmelCase) , int(_UpperCAmelCase))
# Start math
if m <= 2:
SCREAMING_SNAKE_CASE = y - 1
SCREAMING_SNAKE_CASE = m + 12
# maths var
SCREAMING_SNAKE_CASE = int(str(_UpperCAmelCase)[:2])
SCREAMING_SNAKE_CASE = int(str(_UpperCAmelCase)[2:])
SCREAMING_SNAKE_CASE = int(2.6 * m - 5.39)
SCREAMING_SNAKE_CASE = int(c / 4)
SCREAMING_SNAKE_CASE = int(k / 4)
SCREAMING_SNAKE_CASE = int(d + k)
SCREAMING_SNAKE_CASE = int(t + u + v + x)
SCREAMING_SNAKE_CASE = int(z - (2 * c))
SCREAMING_SNAKE_CASE = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.')
# Response
SCREAMING_SNAKE_CASE = F'''Your date {date_input}, is a {days[str(_UpperCAmelCase)]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Tuple = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
a_ : Any = parser.parse_args()
zeller(args.date_input)
| 327 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
def is_in_circle(UpperCamelCase__ : float , UpperCamelCase__ : float ) -> bool:
__lowerCamelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__lowerCamelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCamelCase__ ) )
# The ratio of the area for circle to square is pi/4.
__lowerCamelCase = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Callable[[float], float] , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(UpperCamelCase__ , UpperCamelCase__ ) ) for _ in range(UpperCamelCase__ ) ) * (max_value - min_value)
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(UpperCamelCase__ : float ) -> float:
return x
__lowerCamelCase = area_under_curve_estimator(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print('******************' )
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> None:
"""simple docstring"""
def function_to_integrate(UpperCamelCase__ : float ) -> float:
return sqrt(4.0 - x * x )
__lowerCamelCase = area_under_curve_estimator(
UpperCamelCase__ , UpperCamelCase__ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__="None" , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = relative_attention
__lowerCamelCase = position_biased_input
__lowerCamelCase = pos_att_type
__lowerCamelCase = scope
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.get_config()
__lowerCamelCase = 300
return config
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = DebertaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )[0]
__lowerCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )[0]
__lowerCamelCase = model(lowerCamelCase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = DebertaForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = DebertaForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = DebertaForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = DebertaForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = DebertaModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = DebertaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = DebertaModel.from_pretrained('microsoft/deberta-base' )
__lowerCamelCase = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__lowerCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
# compare the actual values for a slice.
__lowerCamelCase = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 90 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''marian'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int , _UpperCAmelCase : int=58101 , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Dict=1024 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Any=4096 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Optional[int]=4096 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Union[str, Any]=1024 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Union[str, Any]=58100 , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : List[str]=58100 , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : Any=True , **_UpperCAmelCase : Tuple , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = decoder_vocab_size or vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase_ = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowercase__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCAmelCase_ = {0: "batch"}
UpperCAmelCase_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
for i in range(_UpperCAmelCase ):
UpperCAmelCase_ = {0: "batch", 2: "past_sequence + sequence"}
UpperCAmelCase_ = {0: "batch", 2: "past_sequence + sequence"}
else:
UpperCAmelCase_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = super().outputs
else:
UpperCAmelCase_ = super(_UpperCAmelCase , self ).outputs
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
for i in range(_UpperCAmelCase ):
UpperCAmelCase_ = {0: "batch", 2: "past_sequence + sequence"}
UpperCAmelCase_ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self._generate_dummy_inputs_for_encoder_and_decoder(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Generate decoder inputs
UpperCAmelCase_ = seq_length if not self.use_past else 1
UpperCAmelCase_ = self._generate_dummy_inputs_for_encoder_and_decoder(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase_ = dict(**_UpperCAmelCase , **_UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs["input_ids"].shape
UpperCAmelCase_ = common_inputs["decoder_input_ids"].shape[1]
UpperCAmelCase_ , UpperCAmelCase_ = self.num_attention_heads
UpperCAmelCase_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_ = decoder_seq_length + 3
UpperCAmelCase_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase_ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_UpperCAmelCase , _UpperCAmelCase )] , dim=1 )
UpperCAmelCase_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
UpperCAmelCase_ = min(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = max(_UpperCAmelCase , _UpperCAmelCase ) - min_num_layers
UpperCAmelCase_ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_UpperCAmelCase ),
torch.zeros(_UpperCAmelCase ),
torch.zeros(_UpperCAmelCase ),
torch.zeros(_UpperCAmelCase ),
) )
# TODO: test this.
UpperCAmelCase_ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_UpperCAmelCase , _UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) )
return common_inputs
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self._generate_dummy_inputs_for_encoder_and_decoder(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCAmelCase_ = seqlen + 2
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
UpperCAmelCase_ , UpperCAmelCase_ = self.num_attention_heads
UpperCAmelCase_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_ = common_inputs["attention_mask"].dtype
UpperCAmelCase_ = torch.cat(
[common_inputs["attention_mask"], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
UpperCAmelCase_ = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(_UpperCAmelCase )
]
return common_inputs
def lowercase__ ( self : int , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ = tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
UpperCAmelCase_ = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase_ = dict(tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase ) )
return common_inputs
def lowercase__ ( self : List[Any] , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
else:
UpperCAmelCase_ = self._generate_dummy_inputs_for_causal_lm(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
return common_inputs
def lowercase__ ( self : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = super()._flatten_past_key_values_(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
UpperCAmelCase_ = super(_UpperCAmelCase , self )._flatten_past_key_values_(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@property
def lowercase__ ( self : Dict ) -> float:
'''simple docstring'''
return 1e-4
| 241 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 241 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCamelCase_ ( UpperCamelCase__ : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
__lowerCamelCase = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase__ , UpperCamelCase__ )
# Predict target for test data
__lowerCamelCase = xgb.predict(UpperCamelCase__ )
__lowerCamelCase = predictions.reshape(len(UpperCamelCase__ ) , 1 )
return predictions
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
__lowerCamelCase = fetch_california_housing()
__lowerCamelCase , __lowerCamelCase = data_handling(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_test_split(
UpperCamelCase__ , UpperCamelCase__ , test_size=0.25 , random_state=1 )
__lowerCamelCase = xgboost(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(UpperCamelCase__ , UpperCamelCase__ )}""" )
print(F"""Mean Square Error : {mean_squared_error(UpperCamelCase__ , UpperCamelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 90 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Optional[Any] = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def A__ ( self: List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def A__ ( self: str ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
@property
def A__ ( self: Tuple ) -> Tuple:
def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ):
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = torch.ones([0] )
def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def A__ ( self: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,)
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : str = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Dict = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.dummy_cond_unet
UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert isinstance(pipe.scheduler ,lowerCamelCase_ )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Tuple = self.dummy_cond_unet
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
UpperCAmelCase_ : Optional[Any] = unet.half()
UpperCAmelCase_ : Optional[int] = vae.half()
UpperCAmelCase_ : int = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : str = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
UpperCAmelCase_ : Optional[int] = 4003660346
UpperCAmelCase_ : int = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Any = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
UpperCAmelCase_ : List[Any] = 2734971755
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
UpperCAmelCase_ : Optional[Any] = 1044355234
UpperCAmelCase_ : List[str] = 12
UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 345 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = 384
_lowerCAmelCase = 7
if "tiny" in model_name:
_lowerCAmelCase = 96
_lowerCAmelCase = (2, 2, 6, 2)
_lowerCAmelCase = (3, 6, 12, 24)
elif "small" in model_name:
_lowerCAmelCase = 96
_lowerCAmelCase = (2, 2, 18, 2)
_lowerCAmelCase = (3, 6, 12, 24)
elif "base" in model_name:
_lowerCAmelCase = 128
_lowerCAmelCase = (2, 2, 18, 2)
_lowerCAmelCase = (4, 8, 16, 32)
_lowerCAmelCase = 12
_lowerCAmelCase = 512
elif "large" in model_name:
_lowerCAmelCase = 192
_lowerCAmelCase = (2, 2, 18, 2)
_lowerCAmelCase = (6, 12, 24, 48)
_lowerCAmelCase = 12
_lowerCAmelCase = 768
# set label information
_lowerCAmelCase = 150
_lowerCAmelCase = """huggingface/label-files"""
_lowerCAmelCase = """ade20k-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = SwinConfig(
embed_dim=__lowerCamelCase , depths=__lowerCamelCase , num_heads=__lowerCamelCase , window_size=__lowerCamelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
_lowerCAmelCase = UperNetConfig(
backbone_config=__lowerCamelCase , auxiliary_in_channels=__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , )
return config
def A (__lowerCamelCase :Dict ):
_lowerCAmelCase = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def A (__lowerCamelCase :Any , __lowerCamelCase :str , __lowerCamelCase :Optional[Any] ):
_lowerCAmelCase = dct.pop(__lowerCamelCase )
_lowerCAmelCase = val
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :List[str] ):
_lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCAmelCase = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
_lowerCAmelCase = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase = in_proj_weight[:dim, :]
_lowerCAmelCase = in_proj_bias[: dim]
_lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
_lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
_lowerCAmelCase = in_proj_weight[
-dim :, :
]
_lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase , _lowerCAmelCase = x.shape
_lowerCAmelCase = x.reshape(__lowerCamelCase , 4 , in_channel // 4 )
_lowerCAmelCase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__lowerCamelCase , __lowerCamelCase )
return x
def A (__lowerCamelCase :Dict ):
_lowerCAmelCase , _lowerCAmelCase = x.shape
_lowerCAmelCase = x.reshape(__lowerCamelCase , in_channel // 4 , 4 )
_lowerCAmelCase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__lowerCamelCase , __lowerCamelCase )
return x
def A (__lowerCamelCase :Dict ):
_lowerCAmelCase = x.shape[0]
_lowerCAmelCase = x.reshape(4 , in_channel // 4 )
_lowerCAmelCase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__lowerCamelCase )
return x
def A (__lowerCamelCase :Any ):
_lowerCAmelCase = x.shape[0]
_lowerCAmelCase = x.reshape(in_channel // 4 , 4 )
_lowerCAmelCase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__lowerCamelCase )
return x
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :Optional[int] , __lowerCamelCase :Union[str, Any] ):
_lowerCAmelCase = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
_lowerCAmelCase = model_name_to_url[model_name]
_lowerCAmelCase = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" , file_name=__lowerCamelCase )[
"""state_dict"""
]
for name, param in state_dict.items():
print(__lowerCamelCase , param.shape )
_lowerCAmelCase = get_upernet_config(__lowerCamelCase )
_lowerCAmelCase = UperNetForSemanticSegmentation(__lowerCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowerCAmelCase = state_dict.pop(__lowerCamelCase )
if "bn" in key:
_lowerCAmelCase = key.replace("""bn""" , """batch_norm""" )
_lowerCAmelCase = val
# rename keys
_lowerCAmelCase = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_lowerCAmelCase = reverse_correct_unfold_reduction_order(__lowerCamelCase )
if "norm" in key:
_lowerCAmelCase = reverse_correct_unfold_norm_order(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# verify on image
_lowerCAmelCase = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
_lowerCAmelCase = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" )
_lowerCAmelCase = SegformerImageProcessor()
_lowerCAmelCase = processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
_lowerCAmelCase = model(__lowerCamelCase )
_lowerCAmelCase = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_lowerCAmelCase = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
_lowerCAmelCase = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
_lowerCAmelCase = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
_lowerCAmelCase = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[F"""upernet-swin-{size}""" for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 364 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowerCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self , _lowercase = 1 , _lowercase = None , _lowercase = 0.0 , _lowercase = 50 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size , _lowercase ):
_lowerCAmelCase = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_lowerCAmelCase = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_lowerCAmelCase = randn_tensor(_lowercase , generator=_lowercase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCAmelCase = self.unet(_lowercase , _lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(
_lowercase , _lowercase , _lowercase , eta=_lowercase , use_clipped_model_output=_lowercase , generator=_lowercase ).prev_sample
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 229 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
A__ : Optional[Any] = logging.getLogger(__name__)
def _snake_case ( ) -> int:
lowerCamelCase_ : Tuple =argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=lowerCamelCase__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=lowerCamelCase__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=lowerCamelCase__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=lowerCamelCase__ , default="data/dump" , help="The dump file prefix." )
lowerCamelCase_ : Tuple =parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
lowerCamelCase_ : Tuple =BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ : Optional[Any] =tokenizer.special_tokens_map["cls_token"] # `[CLS]`
lowerCamelCase_ : Any =tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCamelCase_ : str =RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ : List[Any] =tokenizer.special_tokens_map["cls_token"] # `<s>`
lowerCamelCase_ : Any =tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCamelCase_ : Tuple =GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ : Dict =tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
lowerCamelCase_ : Any =tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
lowerCamelCase_ : Optional[int] =fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(lowerCamelCase__ )} examples to process.""" )
lowerCamelCase_ : str =[]
lowerCamelCase_ : Union[str, Any] =0
lowerCamelCase_ : List[str] =10_000
lowerCamelCase_ : int =time.time()
for text in data:
lowerCamelCase_ : List[str] =F"""{bos} {text.strip()} {sep}"""
lowerCamelCase_ : str =tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
rslt.append(lowerCamelCase__ )
iter += 1
if iter % interval == 0:
lowerCamelCase_ : List[Any] =time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
lowerCamelCase_ : Tuple =time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(lowerCamelCase__ )} examples processed.""" )
lowerCamelCase_ : Optional[Any] =F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
lowerCamelCase_ : Optional[int] =tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCamelCase_ : int =[np.uintaa(lowerCamelCase__ ) for d in rslt]
else:
lowerCamelCase_ : Tuple =[np.intaa(lowerCamelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(lowerCamelCase__ , "wb" ) as handle:
pickle.dump(rslt_ , lowerCamelCase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 144 |
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 144 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ : Dict =logging.get_logger(__name__)
A__ : Optional[int] ="""▁"""
A__ : Any ={"""vocab_file""": """sentencepiece.bpe.model"""}
A__ : List[Any] ={
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
A__ : Tuple ={
"""xlm-roberta-base""": 5_12,
"""xlm-roberta-large""": 5_12,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_12,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_12,
"""xlm-roberta-large-finetuned-conll03-english""": 5_12,
"""xlm-roberta-large-finetuned-conll03-german""": 5_12,
}
class UpperCAmelCase ( lowerCAmelCase_ ):
_lowercase: List[Any] = VOCAB_FILES_NAMES
_lowercase: str = PRETRAINED_VOCAB_FILES_MAP
_lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : int , __snake_case : List[str] , __snake_case : int="<s>" , __snake_case : List[str]="</s>" , __snake_case : Union[str, Any]="</s>" , __snake_case : List[str]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : Tuple="<pad>" , __snake_case : Optional[Any]="<mask>" , __snake_case : Optional[Any] = None , **__snake_case : List[str] , ) -> str:
_lowerCAmelCase = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase = 1
_lowerCAmelCase = len(self.sp_model ) + self.fairseq_offset
_lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str ) -> int:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
_lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[int] , __snake_case : Union[str, Any] ) -> Dict:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self : Tuple , __snake_case : Union[str, Any] , __snake_case : List[str] = None ) -> List[Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
_lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : List[str] , __snake_case : Dict , __snake_case : Any = None , __snake_case : List[str] = False ) -> Tuple:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def lowercase__ ( self : Tuple , __snake_case : str , __snake_case : str = None ) -> str:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self : str ) -> Optional[int]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowercase__ ( self : str ) -> Optional[int]:
_lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self : Optional[int] , __snake_case : List[Any] ) -> str:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def lowercase__ ( self : Optional[Any] , __snake_case : Any ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase__ ( self : List[str] , __snake_case : Tuple ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self : Tuple , __snake_case : List[str] ) -> Optional[Any]:
_lowerCAmelCase = """""".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def lowercase__ ( self : List[Any] , __snake_case : Tuple , __snake_case : List[Any] = None ) -> Optional[int]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 352 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> Dict:
_lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowerCAmelCase = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
_lowerCAmelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowerCAmelCase = {"""unk_token""": """<unk>"""}
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__snake_case ) )
_lowerCAmelCase = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"""image_std""": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
_lowerCAmelCase = os.path.join(self.tmpdirname , __snake_case )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__snake_case , __snake_case )
def lowercase__ ( self : Optional[int] , **__snake_case : Dict ) -> List[str]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase__ ( self : List[str] , **__snake_case : Any ) -> List[Any]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase__ ( self : Tuple , **__snake_case : List[str] ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase__ ( self : str ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : str ) -> List[Any]:
_lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Optional[Any] ) -> List[str]:
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__snake_case )
_lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __snake_case )
self.assertIsInstance(processor_fast.tokenizer , __snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __snake_case )
self.assertIsInstance(processor_fast.image_processor , __snake_case )
def lowercase__ ( self : Optional[int] ) -> Dict:
_lowerCAmelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 )
_lowerCAmelCase = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def lowercase__ ( self : Tuple ) -> List[Any]:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(__snake_case , return_tensors="""np""" )
_lowerCAmelCase = processor(images=__snake_case , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Any ) -> str:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = processor(text=__snake_case )
_lowerCAmelCase = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : str ) -> Union[str, Any]:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def lowercase__ ( self : Union[str, Any] ) -> Dict:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(images=__snake_case , visual_prompt=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def lowercase__ ( self : int ) -> Tuple:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase = processor.batch_decode(__snake_case )
_lowerCAmelCase = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
| 220 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
UpperCAmelCase : int = number_of_bytes // partitions
UpperCAmelCase : List[str] = []
for i in range(UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = i * bytes_per_partition + 1
UpperCAmelCase : str = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Dict = len(UpperCAmelCase_ ) # No of vertices in graph
UpperCAmelCase : Tuple = [0] * n
UpperCAmelCase : List[Any] = [False] * n
def dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[str] = True
UpperCAmelCase : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , id_ )
UpperCAmelCase : Optional[Any] = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
UpperCAmelCase : Dict = min(low[at] , low[to] )
UpperCAmelCase : list[tuple[int, int]] = []
for i in range(UpperCAmelCase_ ):
if not visited[i]:
dfs(UpperCAmelCase_ , -1 , UpperCAmelCase_ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 | 1 |
'''simple docstring'''
from math import factorial
def a__ ( lowercase : int, lowercase : int, lowercase : float ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(lowercase, lowercase ) or not isinstance(lowercase, lowercase ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
_UpperCamelCase = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_UpperCamelCase = float(factorial(lowercase ) )
coefficient /= factorial(lowercase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 287 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase__ : Any = logging.getLogger(__name__)
def a__ ( lowercase : Optional[Any], lowercase : Tuple ) -> Any:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
_snake_case : str = field(metadata={'help': 'Should contain the data files for the task.'} )
_snake_case : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_snake_case : bool = field(
default=__magic_name__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', lowercase )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(lowercase )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=lowercase, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=lowercase, cache_dir=model_args.cache_dir, )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=lowercase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=lowercase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(lowercase : EvalPrediction ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(lowercase, p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(lowercase, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=lowercase, args=lowercase, train_dataset=lowercase, eval_dataset=lowercase, compute_metrics=lowercase, data_collator=lowercase, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(lowercase, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', lowercase, lowercase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(lowercase )
return results
def a__ ( lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 287 | 1 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCAmelCase__ (lowerCAmelCase_ = 8 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
i -= len(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = i // 3
__SCREAMING_SNAKE_CASE = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__SCREAMING_SNAKE_CASE = (
chars_incl
+ random(lowerCAmelCase_ , quotient + remainder )
+ random(lowerCAmelCase_ , lowerCAmelCase_ )
+ random(lowerCAmelCase_ , lowerCAmelCase_ )
)
__SCREAMING_SNAKE_CASE = list(lowerCAmelCase_ )
shuffle(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
# random is a generalised function for letters, characters and numbers
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return "".join(secrets.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
pass # Put your code here...
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
pass # Put your code here...
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
pass # Put your code here...
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = 8 ):
'''simple docstring'''
if len(lowerCAmelCase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
__SCREAMING_SNAKE_CASE = any(char in ascii_uppercase for char in password )
__SCREAMING_SNAKE_CASE = any(char in ascii_lowercase for char in password )
__SCREAMING_SNAKE_CASE = any(char in digits for char in password )
__SCREAMING_SNAKE_CASE = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = int(input("Please indicate the max length of your password: " ).strip() )
__SCREAMING_SNAKE_CASE = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(lowerCAmelCase_ ) )
print(
"Alternative Password generated:" , alternative_password_generator(lowerCAmelCase_ , lowerCAmelCase_ ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 54 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
a__ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , **UpperCAmelCase__ : List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = kwargs.pop("padding_side" , "right" )
__SCREAMING_SNAKE_CASE = kwargs.pop("return_attention_mask" , UpperCAmelCase__ )
super().__init__(**UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = True , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(UpperCAmelCase__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__SCREAMING_SNAKE_CASE = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
__SCREAMING_SNAKE_CASE = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(UpperCAmelCase__ ) == 0:
if return_attention_mask:
__SCREAMING_SNAKE_CASE = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__SCREAMING_SNAKE_CASE = required_input[0]
if isinstance(UpperCAmelCase__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__SCREAMING_SNAKE_CASE = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = "tf"
elif is_torch_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = "pt"
elif isinstance(UpperCAmelCase__ , (int, float, list, tuple, np.ndarray) ):
__SCREAMING_SNAKE_CASE = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(UpperCAmelCase__ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__SCREAMING_SNAKE_CASE = to_numpy(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = [to_numpy(UpperCAmelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
__SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
if not all(len(UpperCAmelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__SCREAMING_SNAKE_CASE = []
for i in range(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()}
# truncation
__SCREAMING_SNAKE_CASE = self._truncate(
UpperCAmelCase__ , max_length=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , truncation=UpperCAmelCase__ , )
truncated_inputs.append(UpperCAmelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
__SCREAMING_SNAKE_CASE = {}
for i in range(UpperCAmelCase__ ):
# padding
__SCREAMING_SNAKE_CASE = self._pad(
truncated_inputs[i] , max_length=UpperCAmelCase__ , padding_strategy=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
for key, value in outputs.items():
if key not in batch_outputs:
__SCREAMING_SNAKE_CASE = []
if value.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = value.astype(np.floataa )
batch_outputs[key].append(UpperCAmelCase__ )
return BatchFeature(UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> dict:
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(UpperCAmelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__SCREAMING_SNAKE_CASE = np.ones(len(UpperCAmelCase__ ) , dtype=np.intaa )
if needs_to_be_padded:
__SCREAMING_SNAKE_CASE = max_length - len(UpperCAmelCase__ )
if self.padding_side == "right":
if return_attention_mask:
__SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (0, difference) )
__SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__SCREAMING_SNAKE_CASE = np.pad(
UpperCAmelCase__ , UpperCAmelCase__ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__SCREAMING_SNAKE_CASE = np.pad(
UpperCAmelCase__ , UpperCAmelCase__ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> str:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) > max_length
if needs_to_be_truncated:
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__SCREAMING_SNAKE_CASE = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Optional[int]=None ) -> str:
# Get padding strategy
if padding is not False:
if padding is True:
__SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = PaddingStrategy(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = padding
else:
__SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 54 | 1 |
'''simple docstring'''
import random
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_, A_, A_ : Optional[Any] = [], [], []
for element in data:
if element < pivot:
less.append(snake_case_ )
elif element > pivot:
greater.append(snake_case_ )
else:
equal.append(snake_case_ )
return less, equal, greater
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if index >= len(snake_case_ ) or index < 0:
return None
A_ : int = items[random.randint(0 , len(snake_case_ ) - 1 )]
A_ : str = 0
A_, A_, A_ : List[Any] = _partition(snake_case_ , snake_case_ )
A_ : int = len(snake_case_ )
A_ : Any = len(snake_case_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(snake_case_ , snake_case_ )
# must be in larger
else:
return quick_select(snake_case_ , index - (m + count) ) | 368 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCamelCase :Optional[int] = logging.getLogger(__name__)
lowerCamelCase :Optional[int] = tf.data.AUTOTUNE
def a ( ):
'''simple docstring'''
A_ : Any = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowerCamelCase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowerCamelCase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowerCamelCase__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowerCamelCase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowerCamelCase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowerCamelCase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowerCamelCase__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowerCamelCase__ , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowerCamelCase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowerCamelCase__ , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowerCamelCase__ , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowerCamelCase__ , default=5_12 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowerCamelCase__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowerCamelCase__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
A_ : Dict = parser.parse_args()
return args
def a ( lowerCamelCase__ ):
'''simple docstring'''
try:
if args.tpu_name:
A_ : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
A_ : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowerCamelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCamelCase__ )
return tpu
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = 0
for file in file_list:
A_ : Optional[Any] = file.split("""/""" )[-1]
A_ : Optional[int] = re.search(r"""-\d+-(\d+)\.tfrecord""" , lowerCamelCase__ ).group(1 )
A_ : Any = int(lowerCamelCase__ )
num_samples += sample_count
return num_samples
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
'''simple docstring'''
A_ : List[Any] = count_samples(lowerCamelCase__ )
A_ : List[str] = tf.data.Dataset.from_tensor_slices(lowerCamelCase__ )
if shuffle:
A_ : Optional[int] = dataset.shuffle(len(lowerCamelCase__ ) )
A_ : Tuple = tf.data.TFRecordDataset(lowerCamelCase__ , num_parallel_reads=lowerCamelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A_ : Union[str, Any] = dataset.apply(tf.data.experimental.assert_cardinality(lowerCamelCase__ ) )
A_ : Any = dataset.map(lowerCamelCase__ , num_parallel_calls=lowerCamelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
A_ : List[str] = dataset.shuffle(args.shuffle_buffer_size )
A_ : str = dataset.batch(lowerCamelCase__ , drop_remainder=lowerCamelCase__ )
A_ : str = dataset.map(lowerCamelCase__ , num_parallel_calls=lowerCamelCase__ )
A_ : Optional[int] = dataset.prefetch(lowerCamelCase__ )
return dataset
def a ( lowerCamelCase__ ):
'''simple docstring'''
if not args.no_tpu:
A_ : Dict = initialize_tpu(lowerCamelCase__ )
A_ : str = tf.distribute.TPUStrategy(lowerCamelCase__ )
else:
A_ : Union[str, Any] = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer )
A_ : str = AutoConfig.from_pretrained(args.pretrained_model_config )
A_ : Optional[int] = tokenizer.vocab_size
A_ : Dict = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
A_ : Optional[int] = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
A_ : Dict = count_samples(lowerCamelCase__ )
A_ : Optional[Any] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A_ : Dict = steps_per_epoch * args.num_epochs
with strategy.scope():
A_ : Optional[int] = TFAutoModelForMaskedLM.from_config(lowerCamelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A_, A_ : Any = create_optimizer(
num_train_steps=lowerCamelCase__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCamelCase__ , metrics=["""accuracy"""] )
def decode_fn(lowerCamelCase__ ):
A_ : Optional[int] = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCamelCase__ , lowerCamelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A_ : Optional[int] = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCamelCase__ , return_tensors="""tf""" )
def mask_with_collator(lowerCamelCase__ ):
# TF really needs an isin() function
A_ : str = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A_, A_ : Union[str, Any] = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowerCamelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCamelCase__ , )
return batch
A_ : str = args.per_replica_batch_size * strategy.num_replicas_in_sync
A_ : Tuple = prepare_dataset(
lowerCamelCase__ , decode_fn=lowerCamelCase__ , mask_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ , shuffle=lowerCamelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
A_ : Optional[Any] = prepare_dataset(
lowerCamelCase__ , decode_fn=lowerCamelCase__ , mask_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ , shuffle=lowerCamelCase__ , )
A_ : Union[str, Any] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCamelCase__ ) )
model.fit(
lowerCamelCase__ , validation_data=lowerCamelCase__ , epochs=args.num_epochs , callbacks=lowerCamelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCamelCase :Tuple = parse_args()
main(args) | 135 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.