code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_snake_case = logging.get_logger(__name__)
# General docstring
_snake_case = "PoolFormerConfig"
# Base docstring
_snake_case = "sail/poolformer_s12"
_snake_case = [1, 512, 7, 7]
# Image classification docstring
_snake_case = "sail/poolformer_s12"
_snake_case = "tabby, tabby cat"
_snake_case = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = False ):
if drop_prob == 0.0 or not training:
return input
_A : List[str] = 1 - drop_prob
_A : Optional[int] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_A : str = keep_prob + torch.rand(snake_case_,dtype=input.dtype,device=input.device )
random_tensor.floor_() # binarize
_A : List[str] = input.div(snake_case_ ) * random_tensor
return output
class lowercase ( nn.Module ):
def __init__( self , _a = None ) -> None:
super().__init__()
_A : List[str] = drop_prob
def a__ ( self , _a ) -> torch.Tensor:
return drop_path(_a , self.drop_prob , self.training )
def a__ ( self ) -> str:
return "p={}".format(self.drop_prob )
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a , _a , _a , _a=None ) -> List[str]:
super().__init__()
_A : int = patch_size if isinstance(_a , collections.abc.Iterable ) else (patch_size, patch_size)
_A : Optional[Any] = stride if isinstance(_a , collections.abc.Iterable ) else (stride, stride)
_A : Dict = padding if isinstance(_a , collections.abc.Iterable ) else (padding, padding)
_A : Optional[Any] = nn.Convad(_a , _a , kernel_size=_a , stride=_a , padding=_a )
_A : str = norm_layer(_a ) if norm_layer else nn.Identity()
def a__ ( self , _a ) -> List[Any]:
_A : Any = self.projection(_a )
_A : List[Any] = self.norm(_a )
return embeddings
class lowercase ( nn.GroupNorm ):
def __init__( self , _a , **_a ) -> List[Any]:
super().__init__(1 , _a , **_a )
class lowercase ( nn.Module ):
def __init__( self , _a ) -> int:
super().__init__()
_A : Union[str, Any] = nn.AvgPoolad(_a , stride=1 , padding=pool_size // 2 , count_include_pad=_a )
def a__ ( self , _a ) -> Tuple:
return self.pool(_a ) - hidden_states
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a , _a ) -> int:
super().__init__()
_A : Tuple = nn.Convad(_a , _a , 1 )
_A : Tuple = nn.Convad(_a , _a , 1 )
_A : Any = PoolFormerDropPath(_a )
if isinstance(config.hidden_act , _a ):
_A : Optional[int] = ACTaFN[config.hidden_act]
else:
_A : Optional[Any] = config.hidden_act
def a__ ( self , _a ) -> Any:
_A : Tuple = self.conva(_a )
_A : int = self.act_fn(_a )
_A : List[str] = self.drop(_a )
_A : Union[str, Any] = self.conva(_a )
_A : Optional[int] = self.drop(_a )
return hidden_states
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]:
super().__init__()
_A : List[str] = PoolFormerPooling(_a )
_A : int = PoolFormerOutput(_a , _a , _a , _a )
_A : Optional[Any] = PoolFormerGroupNorm(_a )
_A : Optional[Any] = PoolFormerGroupNorm(_a )
# Useful for training neural nets
_A : List[Any] = PoolFormerDropPath(_a ) if drop_path > 0.0 else nn.Identity()
_A : List[str] = config.use_layer_scale
if config.use_layer_scale:
_A : Tuple = nn.Parameter(
config.layer_scale_init_value * torch.ones((_a) ) , requires_grad=_a )
_A : Tuple = nn.Parameter(
config.layer_scale_init_value * torch.ones((_a) ) , requires_grad=_a )
def a__ ( self , _a ) -> Union[str, Any]:
if self.use_layer_scale:
_A : Optional[Any] = self.pooling(self.before_norm(_a ) )
_A : Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_A : Union[str, Any] = hidden_states + self.drop_path(_a )
_A : List[Any] = ()
_A : List[str] = self.output(self.after_norm(_a ) )
_A : Optional[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_A : List[Any] = hidden_states + self.drop_path(_a )
_A : Union[str, Any] = (output,) + outputs
return outputs
else:
_A : int = self.drop_path(self.pooling(self.before_norm(_a ) ) )
# First residual connection
_A : int = pooling_output + hidden_states
_A : Dict = ()
# Second residual connection inside the PoolFormerOutput block
_A : int = self.drop_path(self.output(self.after_norm(_a ) ) )
_A : Union[str, Any] = hidden_states + layer_output
_A : Dict = (output,) + outputs
return outputs
class lowercase ( nn.Module ):
def __init__( self , _a ) -> Optional[int]:
super().__init__()
_A : Union[str, Any] = config
# stochastic depth decay rule
_A : Union[str, Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_A : Optional[Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_A : Optional[int] = nn.ModuleList(_a )
# Transformer blocks
_A : Optional[Any] = []
_A : Tuple = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_A : List[Any] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_a , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_a ) )
_A : List[Any] = nn.ModuleList(_a )
def a__ ( self , _a , _a=False , _a=True ) -> Tuple:
_A : Tuple = () if output_hidden_states else None
_A : List[str] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_A : Union[str, Any] = layers
# Get patch embeddings from hidden_states
_A : str = embedding_layer(_a )
# Send the embeddings through the blocks
for _, blk in enumerate(_a ):
_A : Optional[Any] = blk(_a )
_A : List[str] = layer_outputs[0]
if output_hidden_states:
_A : Any = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_a , hidden_states=_a )
class lowercase ( UpperCamelCase__ ):
_a = PoolFormerConfig
_a = "poolformer"
_a = "pixel_values"
_a = True
def a__ ( self , _a ) -> Tuple:
if isinstance(_a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_a , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def a__ ( self , _a , _a=False ) -> Union[str, Any]:
if isinstance(_a , _a ):
_A : List[str] = value
_snake_case = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_snake_case = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a ) -> List[str]:
super().__init__(_a )
_A : Any = config
_A : Union[str, Any] = PoolFormerEncoder(_a )
# Initialize weights and apply final processing
self.post_init()
def a__ ( self ) -> str:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a__ ( self , _a = None , _a = None , _a = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
_A : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_A : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_A : Any = self.encoder(
_a , output_hidden_states=_a , return_dict=_a , )
_A : Optional[int] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_a , hidden_states=encoder_outputs.hidden_states , )
class lowercase ( nn.Module ):
def __init__( self , _a ) -> Tuple:
super().__init__()
_A : List[str] = nn.Linear(config.hidden_size , config.hidden_size )
def a__ ( self , _a ) -> Tuple:
_A : str = self.dense(_a )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a ) -> List[Any]:
super().__init__(_a )
_A : Tuple = config.num_labels
_A : Optional[Any] = PoolFormerModel(_a )
# Final norm
_A : Optional[int] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_A : str = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a__ ( self , _a = None , _a = None , _a = None , _a = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
_A : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_A : Optional[Any] = self.poolformer(
_a , output_hidden_states=_a , return_dict=_a , )
_A : Tuple = outputs[0]
_A : Any = self.classifier(self.norm(_a ).mean([-2, -1] ) )
_A : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_A : Union[str, Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_A : Optional[int] = """single_label_classification"""
else:
_A : List[Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
_A : Union[str, Any] = MSELoss()
if self.num_labels == 1:
_A : str = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_A : Optional[Any] = loss_fct(_a , _a )
elif self.config.problem_type == "single_label_classification":
_A : List[str] = CrossEntropyLoss()
_A : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_A : List[Any] = BCEWithLogitsLoss()
_A : Dict = loss_fct(_a , _a )
if not return_dict:
_A : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_a , logits=_a , hidden_states=outputs.hidden_states )
| 711
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_A : str = self.transformer_dir
shutil.copy(
os.path.join(_a , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a__ ( self ) -> Optional[int]:
_A : List[str] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a__ ( self , _a , _a , _a , _a=None ) -> Optional[Any]:
_A : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_A : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_A : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_A : Optional[int] = black.format_str(_a , mode=_a )
_A : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_a , """w""" , newline="""\n""" ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_a )
with open(_a , """r""" ) as f:
self.assertTrue(f.read() , _a )
def a__ ( self ) -> str:
_A : Union[str, Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _a , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _a ) , )
# Copy consistency with a really long name
_A : List[str] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , _a , _a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _a , overwrite_result=re.sub("""Bert""" , """TestModel""" , _a ) , )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_A , _A : Tuple = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
self.assertFalse(_a )
self.assertEqual(_a , _a )
_A , _A : List[str] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_a )
_A : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_A : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A , _A : Optional[int] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_a , _a )
| 54
| 0
|
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = [int(snake_case_ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(snake_case_ ) == 4 and all(0 <= int(snake_case_ ) <= 254 for octet in octets )
if __name__ == "__main__":
_snake_case = input().strip()
_snake_case = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 712
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=2048 , _a=128 , _a=1 , _a=512 , _a=30 , _a=4_4100 , ) -> Tuple:
_A : Any = parent
_A : str = batch_size
_A : Union[str, Any] = min_seq_length
_A : int = max_seq_length
_A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = spectrogram_length
_A : int = feature_size
_A : str = num_audio_channels
_A : Tuple = hop_length
_A : List[str] = chunk_length
_A : Union[str, Any] = sampling_rate
def a__ ( self ) -> Tuple:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a__ ( self , _a=False , _a=False ) -> Optional[int]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : List[Any] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = TvltFeatureExtractor
def a__ ( self ) -> Any:
_A : int = TvltFeatureExtractionTester(self )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , """spectrogram_length""" ) )
self.assertTrue(hasattr(_a , """feature_size""" ) )
self.assertTrue(hasattr(_a , """num_audio_channels""" ) )
self.assertTrue(hasattr(_a , """hop_length""" ) )
self.assertTrue(hasattr(_a , """chunk_length""" ) )
self.assertTrue(hasattr(_a , """sampling_rate""" ) )
def a__ ( self ) -> Optional[int]:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : int = feat_extract_second.to_dict()
_A : int = dict_first.pop("""mel_filters""" )
_A : Optional[int] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Union[str, Any] = self.feature_extraction_class.from_json_file(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : Union[str, Any] = feat_extract_second.to_dict()
_A : List[Any] = dict_first.pop("""mel_filters""" )
_A : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Optional[Any]:
# Initialize feature_extractor
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_A : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A : str = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A : Optional[Any] = feature_extractor(
_a , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Optional[int] = np.asarray(_a )
_A : Optional[Any] = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Dict = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self._load_datasamples(1 )
_A : List[str] = TvltFeatureExtractor()
_A : List[Any] = feature_extractor(_a , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A : int = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 54
| 0
|
from __future__ import annotations
class lowercase :
def __init__( self , _a = 0 ) -> str:
_A : Any = key
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def a__ ( self , _a , _a ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 0
|
import heapq
def lowerCAmelCase_ ( snake_case_ ):
_A : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case_,[-1 * len(snake_case_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
_A : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_A : str = heapq.heappop(snake_case_ )[1][0]
chosen_vertices.add(snake_case_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_A : List[Any] = elem[1][1].index(snake_case_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 714
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaImgaImgPipeline
_a = ["image_embeds", "negative_image_embeds", "image"]
_a = [
"image_embeds",
"negative_image_embeds",
"image",
]
_a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> int:
return 32
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> List[str]:
return self.time_input_dim
@property
def a__ ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> str:
return 100
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Union[str, Any] = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> int:
_A : Any = self.dummy_unet
_A : List[Any] = self.dummy_movq
_A : str = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_A : int = DDIMScheduler(**_a )
_A : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> str:
_A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
if str(_a ).startswith("""mps""" ):
_A : Tuple = torch.manual_seed(_a )
else:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Optional[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Union[str, Any]:
_A : Dict = """cpu"""
_A : int = self.get_dummy_components()
_A : Optional[int] = self.pipeline_class(**_a )
_A : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : Dict = output.images
_A : List[str] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : Dict = image[0, -3:, -3:, -1]
_A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[int] = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> List[str]:
_A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_A : Dict = """A red cartoon frog, 4k"""
_A : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : int = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_A : Dict = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A , _A : List[str] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_A : int = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 54
| 0
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PriorTransformer
_a = "hidden_states"
@property
def a__ ( self ) -> str:
_A : Optional[Any] = 4
_A : List[str] = 8
_A : Optional[Any] = 7
_A : Union[str, Any] = floats_tensor((batch_size, embedding_dim) ).to(_a )
_A : Tuple = floats_tensor((batch_size, embedding_dim) ).to(_a )
_A : Any = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_a )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def a__ ( self , _a=0 ) -> Optional[Any]:
torch.manual_seed(_a )
_A : Dict = 4
_A : str = 8
_A : Optional[Any] = 7
_A : Any = torch.randn((batch_size, embedding_dim) ).to(_a )
_A : Tuple = torch.randn((batch_size, embedding_dim) ).to(_a )
_A : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_a )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def a__ ( self ) -> Union[str, Any]:
return (4, 8)
@property
def a__ ( self ) -> int:
return (4, 8)
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
_A : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def a__ ( self ) -> Union[str, Any]:
_A : Dict = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(_a )
_A : Optional[int] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def a__ ( self ) -> Any:
_A : List[Any] = self.prepare_init_args_and_inputs_for_common()
_A : Optional[int] = self.model_class(**_a )
_A : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : Optional[int] = [*signature.parameters.keys()]
_A : List[Any] = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , _a )
def a__ ( self ) -> Any:
_A : Union[str, Any] = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
_A : Optional[int] = model.to(_a )
if hasattr(_a , """set_default_attn_processor""" ):
model.set_default_attn_processor()
_A : Tuple = self.get_dummy_seed_input()
with torch.no_grad():
_A : Dict = model(**_a )[0]
_A : List[str] = output[0, :5].flatten().cpu()
print(_a )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
_A : Optional[int] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(_a , _a , rtol=1e-2 ) )
@slow
class lowercase ( unittest.TestCase ):
def a__ ( self , _a=1 , _a=768 , _a=77 , _a=0 ) -> List[str]:
torch.manual_seed(_a )
_A : List[str] = batch_size
_A : Optional[int] = embedding_dim
_A : List[str] = num_embeddings
_A : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(_a )
_A : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(_a )
_A : Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_a )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def a__ ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def a__ ( self , _a , _a ) -> Any:
_A : List[Any] = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(_a )
_A : Optional[Any] = self.get_dummy_seed_input(seed=_a )
with torch.no_grad():
_A : Dict = model(**_a )[0]
assert list(sample.shape ) == [1, 768]
_A : Optional[Any] = sample[0, :8].flatten().cpu()
print(_a )
_A : Tuple = torch.tensor(_a )
assert torch_all_close(_a , _a , atol=1e-3 )
| 715
|
def lowerCAmelCase_ ( snake_case_ = 1000000 ):
_A : Any = limit + 1
_A : Tuple = [0] * limit
for first_term in range(1,snake_case_ ):
for n in range(snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_A : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
| 0
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_snake_case : List[Any] = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
_snake_case : Dict = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
_snake_case : Union[str, Any] = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
_snake_case : List[Any] = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
_snake_case : int = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
for tf_name, hf_name in patterns:
_A : Optional[Any] = k.replace(snake_case_,snake_case_ )
return k
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = BigBirdPegasusConfig(**snake_case_ )
_A : Tuple = BigBirdPegasusForConditionalGeneration(snake_case_ )
_A : Union[str, Any] = torch_model.state_dict()
_A : List[str] = {}
# separating decoder weights
_A : Any = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
_A : Union[str, Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items(),"""tf -> hf conversion""" ):
_A : Tuple = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE]
if any(snake_case_ ):
continue
_A : Optional[int] = DECODER_PATTERNS
_A : List[Any] = rename_state_dict_key(snake_case_,snake_case_ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
_A : List[Any] = v.T
_A : Dict = torch.from_numpy(snake_case_ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(),"""tf -> hf conversion""" ):
_A : Dict = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE]
if any(snake_case_ ):
continue
_A : Any = REMAINING_PATTERNS
_A : List[Any] = rename_state_dict_key(snake_case_,snake_case_ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
_A : List[Any] = v.T
_A : Tuple = torch.from_numpy(snake_case_ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
_A : List[Any] = mapping["""model.embed_positions.weight"""]
_A : List[str] = mapping.pop("""model.embed_positions.weight""" )
_A : Dict = torch_model.load_state_dict(snake_case_,strict=snake_case_ )
_A : List[str] = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = tf.train.list_variables(snake_case_ )
_A : Any = {}
_A : Optional[Any] = ["""global_step"""]
for name, shape in tqdm(snake_case_,desc="""converting tf checkpoint to dict""" ):
_A : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_A : List[str] = tf.train.load_variable(snake_case_,snake_case_ )
_A : Optional[int] = array
return tf_weights
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : int = get_tf_weights_as_numpy(snake_case_ )
_A : Optional[Any] = convert_bigbird_pegasus(snake_case_,snake_case_ )
torch_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_snake_case : Tuple = parser.parse_args()
_snake_case : Union[str, Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 716
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Any:
super().__init__()
_A : Dict = pad_token_id
_A : List[Any] = max_length
_A : Optional[int] = vocab
_A : Optional[int] = merges
_A : Optional[int] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> str:
_A : Any = [""" """.join(_a ) for m in tokenizer.bpe_ranks.keys()]
_A : str = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> List[Any]:
_A : Union[str, Any] = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def a__ ( cls , _a ) -> Union[str, Any]:
return cls(**_a )
def a__ ( self ) -> Union[str, Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ ( self , _a , _a = None ) -> int:
_A : Optional[int] = self.tf_tokenizer(_a )
_A : Tuple = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A : Dict = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54
| 0
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Optional[int]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_a )
def a__ ( self ) -> Any:
_A : str = self._create_example_records()
_A : List[Any] = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def a__ ( self ) -> List[str]:
_A : Dict = self._create_example_records()
_A : List[str] = Dataset.from_list(_a )
_A : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self ) -> str: # checks what happens with missing columns
_A : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A : List[str] = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a__ ( self ) -> Dict: # checks if the type can be inferred from the second record
_A : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A : str = Dataset.from_list(_a )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a__ ( self ) -> Dict:
_A : List[str] = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 717
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( UpperCamelCase__ ):
_a = 4_2
_a = 4_2
def __init__( self , _a , _a ) -> Any:
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = 1 , _a = 2000 , _a = None , _a = "pil" , _a = True , **_a , ) -> Union[ImagePipelineOutput, Tuple]:
_A : List[Any] = self.unet.config.sample_size
_A : Any = (batch_size, 3, img_size, img_size)
_A : Dict = self.unet
_A : Any = randn_tensor(_a , generator=_a ) * self.scheduler.init_noise_sigma
_A : List[str] = sample.to(self.device )
self.scheduler.set_timesteps(_a )
self.scheduler.set_sigmas(_a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_A : int = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
_A : Tuple = self.unet(_a , _a ).sample
_A : int = self.scheduler.step_correct(_a , _a , generator=_a ).prev_sample
# prediction step
_A : Optional[int] = model(_a , _a ).sample
_A : Optional[Any] = self.scheduler.step_pred(_a , _a , _a , generator=_a )
_A : Tuple = output.prev_sample, output.prev_sample_mean
_A : Tuple = sample_mean.clamp(0 , 1 )
_A : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A : Any = self.numpy_to_pil(_a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_a )
| 718
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "roberta"
def __init__( self , _a=5_0265 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , **_a , ) -> List[str]:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Dict = vocab_size
_A : str = hidden_size
_A : List[Any] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Union[str, Any] = hidden_act
_A : List[str] = intermediate_size
_A : int = hidden_dropout_prob
_A : List[str] = attention_probs_dropout_prob
_A : List[Any] = max_position_embeddings
_A : Union[str, Any] = type_vocab_size
_A : Optional[Any] = initializer_range
_A : int = layer_norm_eps
_A : List[str] = position_embedding_type
_A : List[Any] = use_cache
_A : Dict = classifier_dropout
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 719
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Optional[int]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_a )
def a__ ( self ) -> Any:
_A : str = self._create_example_records()
_A : List[Any] = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def a__ ( self ) -> List[str]:
_A : Dict = self._create_example_records()
_A : List[str] = Dataset.from_list(_a )
_A : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self ) -> str: # checks what happens with missing columns
_A : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A : List[str] = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a__ ( self ) -> Dict: # checks if the type can be inferred from the second record
_A : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A : str = Dataset.from_list(_a )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a__ ( self ) -> Dict:
_A : List[str] = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 54
| 0
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def a__ ( *_a , **_a ) -> Union[str, Any]:
pass
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def a__ ( self , _a , _a , _a ) -> Tuple:
_A : Any = DepthEstimationPipeline(model=_a , image_processor=_a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def a__ ( self , _a , _a ) -> str:
_A : List[Any] = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , _a )
import datasets
_A : int = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
_A : str = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , _a , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def a__ ( self ) -> str:
pass
@slow
@require_torch
def a__ ( self ) -> Optional[Any]:
_A : Tuple = """Intel/dpt-large"""
_A : Dict = pipeline("""depth-estimation""" , model=_a )
_A : Any = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
_A : Dict = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def a__ ( self ) -> int:
# This is highly irregular to have no small tests.
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 720
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(snake_case_ )
_A : List[Any] = list(snake_case_ )
_A : Tuple = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_A : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
while True:
_A : int = ["""$"""] * len(snake_case_ )
_A : Any = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1,len(snake_case_ ) ):
_A : Tuple = compare_string(binary[i],binary[j] )
if k is False:
_A : str = """*"""
_A : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_A : Dict = list(set(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = []
for minterm in minterms:
_A : Tuple = """"""
for _ in range(snake_case_ ):
_A : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = list(snake_case_ )
_A : Tuple = list(snake_case_ )
_A : Dict = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = []
_A : str = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_A : Union[str, Any] = 0
_A : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_A : Dict = j
if count == 1:
_A : int = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_A : int = 0
temp.append(prime_implicants[i] )
while True:
_A : Optional[Any] = 0
_A : Tuple = -1
_A : List[Any] = 0
for i in range(len(snake_case_ ) ):
_A : List[str] = chart[i].count(1 )
if count_n > max_n:
_A : Optional[int] = count_n
_A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_A : Optional[int] = 0
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i],binary[j],snake_case_ ):
_A : Union[str, Any] = 1
return chart
def lowerCAmelCase_ ( ):
_A : Dict = int(input("""Enter the no. of variables\n""" ) )
_A : Dict = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_A : int = decimal_to_binary(snake_case_,snake_case_ )
_A : Optional[Any] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_A : int = prime_implicant_chart(snake_case_,snake_case_ )
_A : int = selection(snake_case_,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 54
| 0
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = len(snake_case_ )
print("""The following activities are selected:""" )
# The first activity is always selected
_A : Tuple = 0
print(snake_case_,end=""",""" )
# Consider rest of the activities
for j in range(snake_case_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case_,end=""",""" )
_A : Optional[int] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = [1, 3, 0, 5, 8, 5]
_snake_case = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 721
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
_A : Tuple = word_bank or []
# create a table
_A : int = len(snake_case_ ) + 1
_A : list[list[list[str]]] = []
for _ in range(snake_case_ ):
table.append([] )
# seed value
_A : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 54
| 0
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> int:
_A : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , """embed_dim""" ) )
self.parent.assertTrue(hasattr(_a , """num_heads""" ) )
class lowercase :
def __init__( self , _a , _a=13 , _a=64 , _a=3 , _a=[16, 48, 96] , _a=[1, 3, 6] , _a=[1, 2, 10] , _a=[7, 3, 3] , _a=[4, 2, 2] , _a=[2, 1, 1] , _a=[2, 2, 2] , _a=[False, False, True] , _a=[0.0, 0.0, 0.0] , _a=0.02 , _a=1e-12 , _a=True , _a=True , _a=2 , ) -> Optional[int]:
_A : str = parent
_A : Union[str, Any] = batch_size
_A : int = image_size
_A : Optional[int] = patch_sizes
_A : List[str] = patch_stride
_A : Any = patch_padding
_A : int = is_training
_A : List[str] = use_labels
_A : Tuple = num_labels
_A : Tuple = num_channels
_A : Optional[int] = embed_dim
_A : List[str] = num_heads
_A : Union[str, Any] = stride_kv
_A : Union[str, Any] = depth
_A : Dict = cls_token
_A : Dict = attention_drop_rate
_A : List[Any] = initializer_range
_A : Any = layer_norm_eps
def a__ ( self ) -> Dict:
_A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : Union[str, Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
_A : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_A : List[str] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a ) -> Optional[Any]:
_A : Dict = TFCvtModel(config=_a )
_A : int = model(_a , training=_a )
_A : Union[str, Any] = (self.image_size, self.image_size)
_A : Union[str, Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_A : Optional[int] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_A : Dict = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def a__ ( self , _a , _a , _a ) -> List[str]:
_A : Any = self.num_labels
_A : str = TFCvtForImageClassification(_a )
_A : List[Any] = model(_a , labels=_a , training=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self ) -> Tuple:
_A : List[Any] = self.prepare_config_and_inputs()
_A : Tuple = config_and_inputs
_A : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
_a = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = TFCvtModelTester(self )
_A : str = TFCvtConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> str:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def a__ ( self ) -> List[str]:
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def a__ ( self ) -> List[Any]:
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def a__ ( self ) -> str:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def a__ ( self ) -> Union[str, Any]:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def a__ ( self ) -> List[str]:
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def a__ ( self ) -> int:
_A : Any = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(_a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def a__ ( self ) -> Union[str, Any]:
_A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : List[Any] = model_class(_a )
_A : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : List[Any] = [*signature.parameters.keys()]
_A : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Optional[Any]:
def check_hidden_states_output(_a , _a , _a ):
_A : Union[str, Any] = model_class(_a )
_A : Union[str, Any] = model(**self._prepare_for_class(_a , _a ) )
_A : Optional[int] = outputs.hidden_states
_A : Optional[Any] = len(self.model_tester.depth )
self.assertEqual(len(_a ) , _a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Any = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[Any] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> Dict:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Tuple:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Any:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = TFCvtModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> int:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def a__ ( self ) -> Tuple:
_A : Dict = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_A : Optional[Any] = self.default_image_processor
_A : Tuple = prepare_img()
_A : Optional[Any] = image_processor(images=_a , return_tensors="""tf""" )
# forward pass
_A : int = model(**_a )
# verify the logits
_A : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Union[str, Any] = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _a , atol=1e-4 ) )
| 700
|
import operator
def lowerCAmelCase_ ( snake_case_,snake_case_ = False,snake_case_ = None ):
_A : str = operator.lt if reverse else operator.gt
_A : Optional[Any] = solution or []
if not arr:
return solution
_A : Dict = [arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_,sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
_A : Union[str, Any] = sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_,snake_case_ ):
solution.insert(snake_case_,snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_,snake_case_,snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 54
| 0
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class lowercase :
def __init__( self , _a = None , _a = [] ) -> str:
_A : List[str] = 0
_A : Tuple = choices
_A : Optional[int] = prompt
if sys.platform == "win32":
_A : List[Any] = """*"""
else:
_A : Dict = """➔ """
def a__ ( self , _a , _a = "" ) -> List[str]:
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _a )
else:
forceWrite(self.choices[index] , _a )
def a__ ( self , _a ) -> Tuple:
if index == self.position:
forceWrite(F''' {self.arrow_char} ''' )
self.write_choice(_a )
else:
forceWrite(F''' {self.choices[index]}''' )
reset_cursor()
def a__ ( self , _a , _a = 1 ) -> str:
_A : Optional[int] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_a )
move_cursor(_a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def a__ ( self ) -> List[Any]:
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def a__ ( self ) -> Optional[int]:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def a__ ( self ) -> Optional[int]:
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def a__ ( self ) -> List[Any]:
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_a )] for number in range(10 )] )
def a__ ( self ) -> List[Any]:
_A : Dict = int(chr(self.current_selection ) )
_A : Dict = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _a )
else:
return
else:
return
def a__ ( self , _a = 0 ) -> Optional[Any]:
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
_A : int = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_a )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
_A : Dict = int(builtins.input() )
except ValueError:
_A : List[Any] = default_choice
else:
_A : Union[str, Any] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(_a , """\n""" )
return choice
| 701
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_MASKED_LM_MAPPING
_a = TF_MODEL_FOR_MASKED_LM_MAPPING
def a__ ( self ) -> Tuple:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a__ ( self ) -> Any:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
_A : Tuple = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
_A : List[str] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> str:
_A : Any = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
_A : List[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
_A : str = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def a__ ( self ) -> Union[str, Any]:
_A : int = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
_A : Optional[Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_a , _a )
@slow
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(_a )
@slow
@require_tf
def a__ ( self ) -> Tuple:
_A : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(_a )
def a__ ( self , _a ) -> Tuple:
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
_A : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
_A : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> Tuple:
_A : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
_A : str = None
_A : Union[str, Any] = None
self.run_pipeline_test(_a , [] )
@require_tf
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
_A : Any = None
_A : Dict = None
self.run_pipeline_test(_a , [] )
def a__ ( self , _a , _a , _a ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_A : Optional[Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Tuple = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a__ ( self , _a , _a ) -> Dict:
_A : Dict = fill_masker.tokenizer
_A : List[str] = fill_masker.model
_A : List[str] = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Optional[Any] = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
with self.assertRaises(_a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_a ):
fill_masker("""This is""" )
self.run_test_top_k(_a , _a )
self.run_test_targets(_a , _a )
self.run_test_top_k_targets(_a , _a )
self.fill_mask_with_duplicate_targets_and_top_k(_a , _a )
self.fill_mask_with_multiple_masks(_a , _a )
def a__ ( self , _a , _a ) -> List[str]:
_A : int = tokenizer.get_vocab()
_A : str = sorted(vocab.keys() )[:2]
# Pipeline argument
_A : Tuple = FillMaskPipeline(model=_a , tokenizer=_a , targets=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Call argument
_A : str = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : int = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Score equivalence
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Optional[int] = [top_mask["""token_str"""] for top_mask in outputs]
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ) == set(_a ):
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
# Raises with invalid
with self.assertRaises(_a ):
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_a ):
_A : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(_a ):
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="""""" )
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : str = FillMaskPipeline(model=_a , tokenizer=_a , top_k=2 )
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Union[str, Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> List[Any]:
_A : Union[str, Any] = tokenizer.get_vocab()
_A : int = FillMaskPipeline(model=_a , tokenizer=_a )
# top_k=2, ntargets=3
_A : List[str] = sorted(vocab.keys() )[:3]
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_A : Any = [el["""token_str"""] for el in sorted(_a , key=lambda _a : x["score"] , reverse=_a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ).issubset(_a ):
_A : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> str:
_A : Optional[int] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
_A : Optional[Any] = sorted(vocab.keys() )[:3]
_A : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_A : Union[str, Any] = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_a , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_a ) , 3 )
def a__ ( self , _a , _a ) -> Tuple:
_A : Any = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[Any] = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
| 54
| 0
|
def lowerCAmelCase_ ( snake_case_ ):
return str(snake_case_ ) == str(snake_case_ )[::-1]
def lowerCAmelCase_ ( snake_case_ ):
return int(snake_case_ ) + int(str(snake_case_ )[::-1] )
def lowerCAmelCase_ ( snake_case_ = 10000 ):
_A : Dict = []
for num in range(1,snake_case_ ):
_A : Dict = 0
_A : str = num
while iterations < 50:
_A : Any = sum_reverse(snake_case_ )
iterations += 1
if is_palindrome(snake_case_ ):
break
else:
lychrel_nums.append(snake_case_ )
return len(snake_case_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 702
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def a__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
_A : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_A : str = dict(zip(_a , range(len(_a ) ) ) )
_A : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_A : str = {"""unk_token""": """<unk>"""}
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> str:
_A : Tuple = """lower newer"""
_A : Optional[Any] = """lower newer"""
return input_text, output_text
def a__ ( self ) -> List[Any]:
_A : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A : str = """lower newer"""
_A : List[str] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_A : int = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : str = tokens + [tokenizer.unk_token]
_A : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@require_ftfy
def a__ ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : Dict = self.tokenizer_class.from_pretrained(_a , **_a )
_A : str = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_A : List[str] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_A : Tuple = """xa\u0303y""" + """ """ + """x\xe3y"""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of space type
_A : Any = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_A : Optional[int] = tokenizer_s.tokenize(_a )
_A : List[Any] = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of line break type
_A : int = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_A : Optional[Any] = tokenizer_s.tokenize(_a )
_A : Tuple = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_A : str = F'''{text_of_1_token} {text_of_1_token}'''
_A : str = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Dict = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_A : Any = F''' {text}'''
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
def a__ ( self ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_a ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def a__ ( self ) -> str:
super().test_tokenization_python_rust_equals()
def a__ ( self ) -> Union[str, Any]:
# CLIP always lower cases letters
pass
| 54
| 0
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[Any] = [0 for i in range(r + 1 )]
# nc0 = 1
_A : Optional[Any] = 1
for i in range(1,n + 1 ):
# to compute current row from previous row.
_A : int = min(snake_case_,snake_case_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 703
|
from datetime import datetime as dt
import os
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCAmelCase_ ( ):
_A : int = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Tuple = g.get_repo("""huggingface/transformers""" )
_A : Dict = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Optional[Any] = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 54
| 0
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_snake_case = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
_snake_case = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
_snake_case = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def a__ ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def a__ ( self , _a , _a , _a = 1 , _a = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_a , hypotheses=_a , min_len=_a , max_len=_a )
}
| 704
|
from __future__ import annotations
class lowercase :
def __init__( self , _a = 0 ) -> str:
_A : Any = key
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def a__ ( self , _a , _a ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54
| 0
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> int:
super().__init__(_a )
_A : Optional[Any] = RobertaEmbeddings(_a )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> Any:
super().__init__(_a )
_A : List[str] = config.num_labels
_A : Optional[Any] = config.num_hidden_layers
_A : List[Any] = DeeRobertaModel(_a )
_A : str = nn.Dropout(config.hidden_dropout_prob )
_A : Optional[int] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_a )
def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> List[str]:
_A : Union[str, Any] = self.num_layers
try:
_A : Dict = self.roberta(
_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , )
_A : Union[str, Any] = outputs[1]
_A : List[Any] = self.dropout(_a )
_A : Any = self.classifier(_a )
_A : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_A : List[Any] = e.message
_A : Any = e.exit_layer
_A : List[str] = outputs[0]
if not self.training:
_A : Optional[int] = entropy(_a )
_A : List[Any] = []
_A : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_A : Tuple = MSELoss()
_A : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[str] = CrossEntropyLoss()
_A : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_A : Optional[Any] = []
for highway_exit in outputs[-1]:
_A : str = highway_exit[0]
if not self.training:
highway_logits_all.append(_a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_A : str = MSELoss()
_A : List[str] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_A : int = CrossEntropyLoss()
_A : Dict = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_a )
if train_highway:
_A : Union[str, Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_A : Optional[int] = (loss,) + outputs
if not self.training:
_A : int = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_A : List[str] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 705
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=10 , _a=160 , _a=8 , _a=0.0 , _a=4000 , _a=False , _a=True , ) -> Optional[int]:
_A : Any = parent
_A : List[Any] = batch_size
_A : List[Any] = min_seq_length
_A : Dict = max_seq_length
_A : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = padding_value
_A : Tuple = sampling_rate
_A : str = return_attention_mask
_A : Any = do_normalize
_A : Union[str, Any] = feature_size
_A : List[Any] = chunk_length
_A : List[Any] = hop_length
def a__ ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , _a=False , _a=False ) -> List[str]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : Any = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = WhisperFeatureExtractor if is_speech_available() else None
def a__ ( self ) -> Tuple:
_A : Optional[int] = WhisperFeatureExtractionTester(self )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Tuple = feat_extract_first.to_dict()
_A : List[Any] = feat_extract_second.to_dict()
_A : List[Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Dict:
_A : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Dict = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Optional[int] = self.feature_extraction_class.from_json_file(_a )
_A : str = feat_extract_first.to_dict()
_A : Any = feat_extract_second.to_dict()
_A : Union[str, Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_A : Dict = feature_extractor(_a , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_A : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_A : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : Tuple = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Any = np.asarray(_a )
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : int = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_A : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs]
_A : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
_A : Optional[int] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : List[Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Dict:
import torch
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
_A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Optional[Any] = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Tuple:
# fmt: off
_A : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_A : Dict = self._load_datasamples(1 )
_A : Optional[Any] = WhisperFeatureExtractor()
_A : Optional[Any] = feature_extractor(_a , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def a__ ( self ) -> str:
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : str = self._load_datasamples(1 )[0]
_A : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_A : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 54
| 0
|
def lowerCAmelCase_ ( ):
for n in range(1,1000000 ):
yield n * (n + 1) // 2
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = 1
_A : str = 2
while i * i <= n:
_A : Dict = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCAmelCase_ ( ):
return next(i for i in triangle_number_generator() if count_divisors(snake_case_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 706
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( snake_case_ ):
return data[1:] + data[0]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = """"""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = int("""0b""" + data[0] + data[-1],2 )
_A : Any = int("""0b""" + data[1:3],2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = message[:4]
_A : List[Any] = message[4:]
_A : Union[str, Any] = apply_table(snake_case_,snake_case_ )
_A : List[Any] = xor(snake_case_,snake_case_ )
_A : Optional[Any] = apply_sbox(snake_case_,temp[:4] ) # noqa: E741
_A : List[Any] = apply_sbox(snake_case_,temp[4:] )
_A : int = """0""" * (2 - len(snake_case_ )) + l # noqa: E741
_A : Union[str, Any] = """0""" * (2 - len(snake_case_ )) + r
_A : List[Any] = apply_table(l + r,snake_case_ )
_A : Any = xor(snake_case_,snake_case_ )
return temp + right
if __name__ == "__main__":
_snake_case = input("Enter 10 bit key: ")
_snake_case = input("Enter 8 bit message: ")
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 54
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "xmod"
def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> Optional[Any]:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : List[Any] = vocab_size
_A : Union[str, Any] = hidden_size
_A : Optional[Any] = num_hidden_layers
_A : List[str] = num_attention_heads
_A : str = hidden_act
_A : Any = intermediate_size
_A : Any = hidden_dropout_prob
_A : Optional[int] = attention_probs_dropout_prob
_A : Any = max_position_embeddings
_A : int = type_vocab_size
_A : Tuple = initializer_range
_A : Dict = layer_norm_eps
_A : List[Any] = position_embedding_type
_A : Union[str, Any] = use_cache
_A : List[Any] = classifier_dropout
_A : Union[str, Any] = pre_norm
_A : Union[str, Any] = adapter_reduction_factor
_A : int = adapter_layer_norm
_A : List[Any] = adapter_reuse_layer_norm
_A : str = ln_before_adapter
_A : Optional[Any] = list(_a )
_A : Optional[int] = default_language
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 707
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Any = size if size is not None else {"""height""": 18, """width""": 18}
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : List[Any] = num_channels
_A : List[str] = image_size
_A : Optional[Any] = min_resolution
_A : List[Any] = max_resolution
_A : Optional[Any] = do_resize
_A : str = size
_A : List[str] = do_normalize
_A : Dict = image_mean
_A : int = image_std
def a__ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> List[str]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 54
| 0
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCAmelCase_ ( snake_case_ ):
_A : str = []
for line in lines:
_A : Dict = re.sub(r"""#.*""","""""",snake_case_ ) # remove comments
if line:
filtered_lines.append(snake_case_ )
_A : Dict = """\n""".join(snake_case_ )
# Make a hash from all this code
_A : Dict = full_str.encode("""utf-8""" )
return shaaaa(snake_case_ ).hexdigest()
# get importable module names and hash for caching
_snake_case = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_snake_case = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_snake_case = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_snake_case = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 708
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a=None , _a=None , *_a , **_a ) -> Optional[int]:
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_A : Optional[Any] = self.model.config
else:
_A : int = config
_A : Optional[Any] = data_args
_A : int = self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_A : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_A : Union[str, Any] = label_smoothed_nll_loss
def a__ ( self , _a ) -> int:
if self.optimizer is None:
_A : List[str] = ["""bias""", """LayerNorm.weight"""]
_A : str = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_A : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_A : Dict = Adafactor
_A : int = {"""scale_parameter""": False, """relative_step""": False}
else:
_A : int = AdamW
_A : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_A : List[str] = self.args.learning_rate
if self.sharded_ddp:
_A : List[str] = OSS(
params=_a , optim=_a , **_a , )
else:
_A : Tuple = optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
_A : Union[str, Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def a__ ( self , _a ) -> Dict:
_A : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_A : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_A : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_A : List[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def a__ ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def a__ ( self , _a , _a , _a ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_A : List[str] = model(**_a , use_cache=_a )[0]
_A : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_A , _A : str = model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
_A : Any = model(**_a , use_cache=_a )[0]
_A : Union[str, Any] = torch.nn.functional.log_softmax(_a , dim=-1 )
_A , _A : List[str] = self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def a__ ( self , _a , _a ) -> List[Any]:
_A : Optional[int] = inputs.pop("""labels""" )
_A , _A : Dict = self._compute_loss(_a , _a , _a )
return loss
def a__ ( self , _a , _a , _a , _a = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_A : int = self._prepare_inputs(_a )
_A : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_A : List[str] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_A : str = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
_A : Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_A , _A : Tuple = self._compute_loss(_a , _a , _a )
_A : Any = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_A : List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_A : Any = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def a__ ( self , _a , _a ) -> Union[str, Any]:
# If PAD token is not defined at least EOS token has to be defined
_A : Optional[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F''' padded to `max_length`={max_length}''' )
_A : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_A : Dict = tensor
return padded_tensor
| 54
| 0
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_snake_case = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
_snake_case = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode("utf-8").split()
_snake_case = "|".join(sys.argv[1:])
_snake_case = re.compile(rf"""^({joined_dirs}).*?\.py$""")
_snake_case = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 709
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_,snake_case_ ) -> bool:
_A : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A : Optional[int] = mean(
int(is_in_circle(uniform(-1.0,1.0 ),uniform(-1.0,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
_A : List[str] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0.0,snake_case_ = 1.0,):
return mean(
function_to_integrate(uniform(snake_case_,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
def identity_function(snake_case_ ) -> float:
return x
_A : Any = area_under_curve_estimator(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def lowerCAmelCase_ ( snake_case_ ):
def function_to_integrate(snake_case_ ) -> float:
return sqrt(4.0 - x * x )
_A : Optional[int] = area_under_curve_estimator(
snake_case_,snake_case_,0.0,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 0
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowercase ( UpperCamelCase__ ):
_a = DistilBertTokenizer
_a = DistilBertTokenizerFast
_a = True
@slow
def a__ ( self ) -> Optional[Any]:
_A : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
_A : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=_a )
_A : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_a )
_A : Any = tokenizer.build_inputs_with_special_tokens(_a )
_A : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 710
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> int:
_A : int = tempfile.mkdtemp()
_A : Union[str, Any] = 8
# DPR tok
_A : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_A : List[str] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_A : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_A : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_A : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A : Dict = {"""unk_token""": """<unk>"""}
_A : Optional[Any] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_A : List[Any] = os.path.join(_a , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def a__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def a__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def a__ ( self ) -> str:
_A : Optional[Any] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
_A : int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_A : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_a )
rag_tokenizer.save_pretrained(_a )
_A : Optional[Any] = RagTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def a__ ( self ) -> str:
_A : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
_A : Tuple = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Tuple = tokenizer(_a )
self.assertIsNotNone(_a )
@slow
def a__ ( self ) -> Dict:
_A : Dict = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
_A : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Optional[Any] = tokenizer(_a )
self.assertIsNotNone(_a )
| 54
| 0
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowercase ( unittest.TestCase ):
@require_torch
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
_A : str = load_dataset("""ashraq/esc50""" )
_A : Optional[Any] = dataset["""train"""]["""audio"""][-1]["""array"""]
_A : List[Any] = audio_classifier(_a , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_a ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def a__ ( self ) -> int:
pass
@slow
@require_torch
def a__ ( self ) -> int:
_A : Any = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
_A : Tuple = load_dataset("""ashraq/esc50""" )
_A : Union[str, Any] = dataset["""train"""]["""audio"""][-1]["""array"""]
_A : str = audio_classifier(_a , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_a ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
_A : Optional[int] = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_a ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
_A : str = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(_a ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def a__ ( self ) -> List[str]:
pass
| 711
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_A : str = self.transformer_dir
shutil.copy(
os.path.join(_a , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a__ ( self ) -> Optional[int]:
_A : List[str] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a__ ( self , _a , _a , _a , _a=None ) -> Optional[Any]:
_A : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_A : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_A : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_A : Optional[int] = black.format_str(_a , mode=_a )
_A : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_a , """w""" , newline="""\n""" ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_a )
with open(_a , """r""" ) as f:
self.assertTrue(f.read() , _a )
def a__ ( self ) -> str:
_A : Union[str, Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _a , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _a ) , )
# Copy consistency with a really long name
_A : List[str] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , _a , _a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _a , overwrite_result=re.sub("""Bert""" , """TestModel""" , _a ) , )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_A , _A : Tuple = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
self.assertFalse(_a )
self.assertEqual(_a , _a )
_A , _A : List[str] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_a )
_A : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_A : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A , _A : Optional[int] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_a , _a )
| 54
| 0
|
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_snake_case = datasets.logging.get_logger(__name__)
_snake_case = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
_snake_case = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
_snake_case = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
_snake_case = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def a__ ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def a__ ( self , _a ) -> Union[str, Any]:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
_A : List[Any] = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
_A : Optional[int] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_A : List[Any] = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
_A : str = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_A : Any = score.BleurtScorer(os.path.join(_a , _a ) )
def a__ ( self , _a , _a ) -> Tuple:
_A : Union[str, Any] = self.scorer.score(references=_a , candidates=_a )
return {"scores": scores}
| 712
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=2048 , _a=128 , _a=1 , _a=512 , _a=30 , _a=4_4100 , ) -> Tuple:
_A : Any = parent
_A : str = batch_size
_A : Union[str, Any] = min_seq_length
_A : int = max_seq_length
_A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = spectrogram_length
_A : int = feature_size
_A : str = num_audio_channels
_A : Tuple = hop_length
_A : List[str] = chunk_length
_A : Union[str, Any] = sampling_rate
def a__ ( self ) -> Tuple:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a__ ( self , _a=False , _a=False ) -> Optional[int]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : List[Any] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = TvltFeatureExtractor
def a__ ( self ) -> Any:
_A : int = TvltFeatureExtractionTester(self )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , """spectrogram_length""" ) )
self.assertTrue(hasattr(_a , """feature_size""" ) )
self.assertTrue(hasattr(_a , """num_audio_channels""" ) )
self.assertTrue(hasattr(_a , """hop_length""" ) )
self.assertTrue(hasattr(_a , """chunk_length""" ) )
self.assertTrue(hasattr(_a , """sampling_rate""" ) )
def a__ ( self ) -> Optional[int]:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : int = feat_extract_second.to_dict()
_A : int = dict_first.pop("""mel_filters""" )
_A : Optional[int] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Union[str, Any] = self.feature_extraction_class.from_json_file(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : Union[str, Any] = feat_extract_second.to_dict()
_A : List[Any] = dict_first.pop("""mel_filters""" )
_A : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Optional[Any]:
# Initialize feature_extractor
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_A : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A : str = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A : Optional[Any] = feature_extractor(
_a , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Optional[int] = np.asarray(_a )
_A : Optional[Any] = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Dict = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self._load_datasamples(1 )
_A : List[str] = TvltFeatureExtractor()
_A : List[Any] = feature_extractor(_a , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A : int = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 54
| 0
|
import csv
import tweepy
# Twitter API credentials
_snake_case = ""
_snake_case = ""
_snake_case = ""
_snake_case = ""
def lowerCAmelCase_ ( snake_case_ ):
'''simple docstring'''
_A : str = tweepy.OAuthHandler(snake_case_,snake_case_ )
auth.set_access_token(snake_case_,snake_case_ )
_A : int = tweepy.API(snake_case_ )
# initialize a list to hold all the tweepy Tweets
_A : Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_A : List[str] = api.user_timeline(screen_name=snake_case_,count=200 )
# save most recent tweets
alltweets.extend(snake_case_ )
# save the id of the oldest tweet less one
_A : List[str] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(snake_case_ ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_A : int = api.user_timeline(
screen_name=snake_case_,count=200,max_id=snake_case_ )
# save most recent tweets
alltweets.extend(snake_case_ )
# update the id of the oldest tweet less one
_A : Union[str, Any] = alltweets[-1].id - 1
print(f'''...{len(snake_case_ )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_A : List[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''',"""w""" ) as f:
_A : List[Any] = csv.writer(snake_case_ )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(snake_case_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 0
|
from collections import defaultdict
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = 1
_A : Optional[int] = True
for v in tree[start]:
if v not in visited:
ret += dfs(snake_case_ )
if ret % 2 == 0:
cuts.append(snake_case_ )
return ret
def lowerCAmelCase_ ( ):
dfs(1 )
if __name__ == "__main__":
_snake_case , _snake_case = 10, 9
_snake_case = defaultdict(list)
_snake_case = {}
_snake_case = []
_snake_case = 0
_snake_case = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 714
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaImgaImgPipeline
_a = ["image_embeds", "negative_image_embeds", "image"]
_a = [
"image_embeds",
"negative_image_embeds",
"image",
]
_a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> int:
return 32
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> List[str]:
return self.time_input_dim
@property
def a__ ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> str:
return 100
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Union[str, Any] = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> int:
_A : Any = self.dummy_unet
_A : List[Any] = self.dummy_movq
_A : str = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_A : int = DDIMScheduler(**_a )
_A : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> str:
_A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
if str(_a ).startswith("""mps""" ):
_A : Tuple = torch.manual_seed(_a )
else:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Optional[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Union[str, Any]:
_A : Dict = """cpu"""
_A : int = self.get_dummy_components()
_A : Optional[int] = self.pipeline_class(**_a )
_A : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : Dict = output.images
_A : List[str] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : Dict = image[0, -3:, -3:, -1]
_A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[int] = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> List[str]:
_A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_A : Dict = """A red cartoon frog, 4k"""
_A : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : int = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_A : Dict = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A , _A : List[str] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_A : int = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 54
| 0
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( snake_case_ ):
return np.dot(snake_case_,snake_case_ )
class lowercase :
def __init__( self , *,
_a = np.inf , _a = "linear" , _a = 0.0 , ) -> None:
_A : Tuple = regularization
_A : str = gamma
if kernel == "linear":
_A : str = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
_A : List[str] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
_A : Optional[Any] = F'''Unknown kernel: {kernel}'''
raise ValueError(_a )
def a__ ( self , _a , _a ) -> float:
return np.dot(_a , _a )
def a__ ( self , _a , _a ) -> float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def a__ ( self , _a , _a ) -> None:
_A : int = observations
_A : int = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
(_A ) : Optional[int] = np.shape(_a )
def to_minimize(_a ) -> float:
_A : Union[str, Any] = 0
(_A ) : Optional[Any] = np.shape(_a )
for i in range(_a ):
for j in range(_a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_a )
_A : Any = LinearConstraint(_a , 0 , 0 )
_A : Optional[Any] = Bounds(0 , self.regularization )
_A : Union[str, Any] = minimize(
_a , np.ones(_a ) , bounds=_a , constraints=[ly_contraint] ).x
_A : Dict = l_star
# calculating mean offset of separation plane to points
_A : Tuple = 0
for i in range(_a ):
for j in range(_a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
_A : List[str] = s / n
def a__ ( self , _a ) -> int:
_A : Union[str, Any] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
def lowerCAmelCase_ ( snake_case_ = 1000000 ):
_A : Any = limit + 1
_A : Tuple = [0] * limit
for first_term in range(1,snake_case_ ):
for n in range(snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_A : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
| 0
|
class lowercase : # Public class to implement a graph
def __init__( self , _a , _a , _a ) -> None:
_A : Optional[int] = row
_A : Optional[Any] = col
_A : Any = graph
def a__ ( self , _a , _a , _a ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def a__ ( self , _a , _a , _a ) -> None:
# Checking all 8 elements surrounding nth element
_A : Dict = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_A : int = [-1, 0, 1, -1, 1, -1, 0, 1]
_A : Optional[int] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _a )
def a__ ( self ) -> int: # And finally, count all islands.
_A : Union[str, Any] = [[False for j in range(self.COL )] for i in range(self.ROW )]
_A : Dict = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_a , _a , _a )
count += 1
return count
| 716
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Any:
super().__init__()
_A : Dict = pad_token_id
_A : List[Any] = max_length
_A : Optional[int] = vocab
_A : Optional[int] = merges
_A : Optional[int] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> str:
_A : Any = [""" """.join(_a ) for m in tokenizer.bpe_ranks.keys()]
_A : str = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> List[Any]:
_A : Union[str, Any] = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def a__ ( cls , _a ) -> Union[str, Any]:
return cls(**_a )
def a__ ( self ) -> Union[str, Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ ( self , _a , _a = None ) -> int:
_A : Optional[int] = self.tf_tokenizer(_a )
_A : Tuple = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A : Dict = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54
| 0
|
from __future__ import annotations
import os
from collections.abc import Mapping
_snake_case = tuple[int, int]
class lowercase :
def __init__( self , _a , _a ) -> None:
_A : set[int] = vertices
_A : dict[EdgeT, int] = {
(min(_a ), max(_a )): weight for edge, weight in edges.items()
}
def a__ ( self , _a , _a ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_A : Union[str, Any] = weight
def a__ ( self ) -> Graph:
_A : Graph = Graph({min(self.vertices )} , {} )
_A : EdgeT
_A : int
_A : EdgeT
_A : int
while len(subgraph.vertices ) < len(self.vertices ):
_A : Dict = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_A : List[str] = edge
_A : Any = weight
subgraph.add_edge(_a , _a )
return subgraph
def lowerCAmelCase_ ( snake_case_ = "p107_network.txt" ):
_A : str = os.path.abspath(os.path.dirname(snake_case_ ) )
_A : str = os.path.join(snake_case_,snake_case_ )
_A : dict[EdgeT, int] = {}
_A : list[str]
_A : int
_A : int
with open(snake_case_ ) as f:
_A : Union[str, Any] = f.read().strip().split("""\n""" )
_A : Tuple = [line.split(""",""" ) for line in data]
for edgea in range(1,len(snake_case_ ) ):
for edgea in range(snake_case_ ):
if adjaceny_matrix[edgea][edgea] != "-":
_A : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
_A : Graph = Graph(set(range(len(snake_case_ ) ) ),snake_case_ )
_A : Graph = graph.prims_algorithm()
_A : int = sum(graph.edges.values() )
_A : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 717
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54
| 0
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = MgpstrTokenizer
_a = False
_a = {}
_a = False
def a__ ( self ) -> str:
super().setUp()
# fmt: off
_A : Optional[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_A : List[str] = dict(zip(_a , range(len(_a ) ) ) )
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
def a__ ( self , **_a ) -> Optional[Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
_A : str = """tester"""
_A : Union[str, Any] = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def a__ ( self ) -> Optional[Any]:
pass
def a__ ( self ) -> Tuple:
_A : Optional[Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A : int = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
_A : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_A : Optional[int] = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def a__ ( self ) -> List[Any]:
_A : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A : Any = self.get_input_output_texts(_a )
_A : str = tokenizer.tokenize(_a )
_A : List[Any] = tokenizer.convert_tokens_to_ids(_a )
_A : Union[str, Any] = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_A : List[Any] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_A : Optional[Any] = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(""" """ , """""" ) , _a )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def a__ ( self ) -> Any:
pass
| 718
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 0
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_,snake_case_ ) -> bool:
_A : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A : Optional[int] = mean(
int(is_in_circle(uniform(-1.0,1.0 ),uniform(-1.0,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
_A : List[str] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0.0,snake_case_ = 1.0,):
return mean(
function_to_integrate(uniform(snake_case_,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
def identity_function(snake_case_ ) -> float:
return x
_A : Any = area_under_curve_estimator(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def lowerCAmelCase_ ( snake_case_ ):
def function_to_integrate(snake_case_ ) -> float:
return sqrt(4.0 - x * x )
_A : Optional[int] = area_under_curve_estimator(
snake_case_,snake_case_,0.0,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Optional[int]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_a )
def a__ ( self ) -> Any:
_A : str = self._create_example_records()
_A : List[Any] = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def a__ ( self ) -> List[str]:
_A : Dict = self._create_example_records()
_A : List[str] = Dataset.from_list(_a )
_A : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self ) -> str: # checks what happens with missing columns
_A : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A : List[str] = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a__ ( self ) -> Dict: # checks if the type can be inferred from the second record
_A : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A : str = Dataset.from_list(_a )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a__ ( self ) -> Dict:
_A : List[str] = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 54
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model"}
_snake_case = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
_snake_case = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
_snake_case = "▁"
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "attention_mask"]
def __init__( self , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a = None , **_a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_A : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_A : int = vocab_file
_A : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
_A : Union[str, Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
_A : Any = len(self.sp_model ) - 1
_A : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def a__ ( self , _a , _a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A : Optional[int] = [self.cls_token_id]
_A : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def a__ ( self , _a , _a = None ) -> List[int]:
_A : int = [self.sep_token_id]
_A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ ( self ) -> Tuple:
return len(self.sp_model )
def a__ ( self ) -> Any:
_A : Union[str, Any] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ ( self , _a ) -> List[str]:
return self.sp_model.encode(_a , out_type=_a )
def a__ ( self , _a ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_A : Optional[int] = self.sp_model.PieceToId(_a )
return spm_id if spm_id else self.unk_token_id
def a__ ( self , _a ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_a )
def a__ ( self , _a ) -> Optional[int]:
_A : Union[str, Any] = []
_A : str = """"""
_A : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
_A : Tuple = True
_A : Dict = []
else:
current_sub_tokens.append(_a )
_A : Optional[Any] = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __getstate__( self ) -> Dict:
_A : List[Any] = self.__dict__.copy()
_A : Union[str, Any] = None
return state
def __setstate__( self , _a ) -> int:
_A : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_A : Tuple = {}
_A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : Optional[Any] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , """wb""" ) as fi:
_A : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 720
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(snake_case_ )
_A : List[Any] = list(snake_case_ )
_A : Tuple = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_A : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
while True:
_A : int = ["""$"""] * len(snake_case_ )
_A : Any = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1,len(snake_case_ ) ):
_A : Tuple = compare_string(binary[i],binary[j] )
if k is False:
_A : str = """*"""
_A : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_A : Dict = list(set(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = []
for minterm in minterms:
_A : Tuple = """"""
for _ in range(snake_case_ ):
_A : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = list(snake_case_ )
_A : Tuple = list(snake_case_ )
_A : Dict = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = []
_A : str = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_A : Union[str, Any] = 0
_A : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_A : Dict = j
if count == 1:
_A : int = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_A : int = 0
temp.append(prime_implicants[i] )
while True:
_A : Optional[Any] = 0
_A : Tuple = -1
_A : List[Any] = 0
for i in range(len(snake_case_ ) ):
_A : List[str] = chart[i].count(1 )
if count_n > max_n:
_A : Optional[int] = count_n
_A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_A : Optional[int] = 0
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i],binary[j],snake_case_ ):
_A : Union[str, Any] = 1
return chart
def lowerCAmelCase_ ( ):
_A : Dict = int(input("""Enter the no. of variables\n""" ) )
_A : Dict = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_A : int = decimal_to_binary(snake_case_,snake_case_ )
_A : Optional[Any] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_A : int = prime_implicant_chart(snake_case_,snake_case_ )
_A : int = selection(snake_case_,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 54
| 0
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a__ ( self , _a=0 ) -> str:
_A : Union[str, Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(_a ) )
_A : Any = np.random.RandomState(_a )
_A : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> Optional[Any]:
_A : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_a )
_A : int = self.get_dummy_inputs()
_A : Union[str, Any] = pipe(**_a ).images
_A : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
_A : int = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def a__ ( self ) -> List[str]:
_A : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_a )
pipe.set_progress_bar_config(disable=_a )
_A : Dict = self.get_dummy_inputs()
_A : int = pipe(**_a ).images
_A : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : List[str] = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def a__ ( self ) -> Any:
_A : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
# warmup pass to apply optimizations
_A : Union[str, Any] = pipe(**self.get_dummy_inputs() )
_A : int = self.get_dummy_inputs()
_A : List[str] = pipe(**_a ).images
_A : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : List[str] = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def a__ ( self ) -> Optional[Any]:
_A : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_A : Dict = self.get_dummy_inputs()
_A : str = pipe(**_a ).images
_A : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : Dict = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def a__ ( self ) -> Union[str, Any]:
_A : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = self.get_dummy_inputs()
_A : str = pipe(**_a ).images
_A : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : Union[str, Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def a__ ( self ) -> List[Any]:
_A : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_A : Any = self.get_dummy_inputs()
_A : Any = pipe(**_a ).images
_A : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : int = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
@property
def a__ ( self ) -> Any:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self ) -> List[str]:
_A : str = ort.SessionOptions()
_A : Any = False
return options
def a__ ( self ) -> List[str]:
_A : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_A : Optional[int] = init_image.resize((768, 512) )
# using the PNDM scheduler by default
_A : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = """A fantasy landscape, trending on artstation"""
_A : List[Any] = np.random.RandomState(0 )
_A : Dict = pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type="""np""" , )
_A : str = output.images
_A : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_A : Optional[Any] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def a__ ( self ) -> Dict:
_A : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_A : List[str] = init_image.resize((768, 512) )
_A : List[str] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
_A : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_A : Any = """A fantasy landscape, trending on artstation"""
_A : Optional[Any] = np.random.RandomState(0 )
_A : Union[str, Any] = pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=_a , output_type="""np""" , )
_A : Optional[Any] = output.images
_A : Any = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_A : Tuple = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 721
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
_A : Tuple = word_bank or []
# create a table
_A : int = len(snake_case_ ) + 1
_A : list[list[list[str]]] = []
for _ in range(snake_case_ ):
table.append([] )
# seed value
_A : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 54
| 0
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ ):
_A : List[str] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_A : List[Any] = 128
elif "12-12" in model_name:
_A : str = 12
_A : Tuple = 12
elif "14-14" in model_name:
_A : Any = 14
_A : Tuple = 14
elif "16-16" in model_name:
_A : Any = 16
_A : Optional[Any] = 16
else:
raise ValueError("""Model not supported""" )
_A : Tuple = """huggingface/label-files"""
if "speech-commands" in model_name:
_A : Optional[Any] = 35
_A : Optional[Any] = """speech-commands-v2-id2label.json"""
else:
_A : Optional[int] = 527
_A : int = """audioset-id2label.json"""
_A : str = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : List[str] = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : Optional[int] = idalabel
_A : Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_ ):
if "module.v" in name:
_A : Union[str, Any] = name.replace("""module.v""","""audio_spectrogram_transformer""" )
if "cls_token" in name:
_A : Dict = name.replace("""cls_token""","""embeddings.cls_token""" )
if "dist_token" in name:
_A : List[str] = name.replace("""dist_token""","""embeddings.distillation_token""" )
if "pos_embed" in name:
_A : List[str] = name.replace("""pos_embed""","""embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_A : Tuple = name.replace("""patch_embed.proj""","""embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
_A : Tuple = name.replace("""blocks""","""encoder.layer""" )
if "attn.proj" in name:
_A : List[Any] = name.replace("""attn.proj""","""attention.output.dense""" )
if "attn" in name:
_A : Union[str, Any] = name.replace("""attn""","""attention.self""" )
if "norm1" in name:
_A : List[str] = name.replace("""norm1""","""layernorm_before""" )
if "norm2" in name:
_A : Optional[Any] = name.replace("""norm2""","""layernorm_after""" )
if "mlp.fc1" in name:
_A : str = name.replace("""mlp.fc1""","""intermediate.dense""" )
if "mlp.fc2" in name:
_A : Dict = name.replace("""mlp.fc2""","""output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_A : Tuple = name.replace("""audio_spectrogram_transformer.norm""","""audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
_A : Optional[int] = name.replace("""module.mlp_head.0""","""classifier.layernorm""" )
if "module.mlp_head.1" in name:
_A : str = name.replace("""module.mlp_head.1""","""classifier.dense""" )
return name
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
for key in orig_state_dict.copy().keys():
_A : int = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
_A : Any = key.split(""".""" )
_A : List[Any] = int(key_split[3] )
_A : str = config.hidden_size
if "weight" in key:
_A : int = val[:dim, :]
_A : List[str] = val[dim : dim * 2, :]
_A : Union[str, Any] = val[-dim:, :]
else:
_A : Optional[Any] = val[:dim]
_A : int = val[dim : dim * 2]
_A : Any = val[-dim:]
else:
_A : str = val
return orig_state_dict
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(snake_case_,snake_case_ )
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=False ):
_A : Any = get_audio_spectrogram_transformer_config(snake_case_ )
_A : str = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
_A : Optional[Any] = model_name_to_url[model_name]
_A : Dict = torch.hub.load_state_dict_from_url(snake_case_,map_location="""cpu""" )
# remove some keys
remove_keys(snake_case_ )
# rename some keys
_A : Tuple = convert_state_dict(snake_case_,snake_case_ )
# load 🤗 model
_A : str = ASTForAudioClassification(snake_case_ )
model.eval()
model.load_state_dict(snake_case_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_A : Any = -4.2_67_73_93 if """speech-commands""" not in model_name else -6.84_59_78
_A : Tuple = 4.5_68_99_74 if """speech-commands""" not in model_name else 5.5_65_45_26
_A : Any = 1024 if """speech-commands""" not in model_name else 128
_A : Union[str, Any] = ASTFeatureExtractor(mean=snake_case_,std=snake_case_,max_length=snake_case_ )
if "speech-commands" in model_name:
_A : int = load_dataset("""speech_commands""","""v0.02""",split="""validation""" )
_A : Optional[int] = dataset[0]["""audio"""]["""array"""]
else:
_A : List[str] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""",filename="""sample_audio.flac""",repo_type="""dataset""",)
_A : List[Any] = torchaudio.load(snake_case_ )
_A : Union[str, Any] = waveform.squeeze().numpy()
_A : List[str] = feature_extractor(snake_case_,sampling_rate=16000,return_tensors="""pt""" )
# forward pass
_A : str = model(**snake_case_ )
_A : Optional[Any] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_A : Optional[int] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_A : Any = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_A : Any = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_A : Any = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_A : str = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_A : int = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_A : Union[str, Any] = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
_A : int = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3],snake_case_,atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(snake_case_ )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 700
|
import operator
def lowerCAmelCase_ ( snake_case_,snake_case_ = False,snake_case_ = None ):
_A : str = operator.lt if reverse else operator.gt
_A : Optional[Any] = solution or []
if not arr:
return solution
_A : Dict = [arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_,sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
_A : Union[str, Any] = sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_,snake_case_ ):
solution.insert(snake_case_,snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_,snake_case_,snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 54
| 0
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
set_seed(770)
_snake_case = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
_snake_case = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
_snake_case = os.path.dirname(os.path.abspath(__file__))
_snake_case = os.path.join(os.path.expanduser("~"), ".cache")
_snake_case = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def lowerCAmelCase_ ( snake_case_,snake_case_=False ):
_A : Tuple = model_type
if use_small:
key += "_small"
return os.path.join(snake_case_,REMOTE_MODEL_PATHS[key]["""file_name"""] )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
os.makedirs(snake_case_,exist_ok=snake_case_ )
hf_hub_download(repo_id=snake_case_,filename=snake_case_,local_dir=snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=False,snake_case_="text" ):
if model_type == "text":
_A : List[Any] = BarkSemanticModel
_A : List[str] = BarkSemanticConfig
_A : List[str] = BarkSemanticGenerationConfig
elif model_type == "coarse":
_A : Any = BarkCoarseModel
_A : str = BarkCoarseConfig
_A : Dict = BarkCoarseGenerationConfig
elif model_type == "fine":
_A : List[Any] = BarkFineModel
_A : Union[str, Any] = BarkFineConfig
_A : Tuple = BarkFineGenerationConfig
else:
raise NotImplementedError()
_A : int = f'''{model_type}_small''' if use_small else model_type
_A : Tuple = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(snake_case_ ):
logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["""repo_id"""],model_info["""file_name"""] )
_A : Union[str, Any] = torch.load(snake_case_,map_location=snake_case_ )
# this is a hack
_A : Dict = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
_A : Optional[Any] = model_args["""vocab_size"""]
_A : List[str] = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_A : Dict = model_args.pop("""n_head""" )
_A : Optional[Any] = model_args.pop("""n_embd""" )
_A : Union[str, Any] = model_args.pop("""n_layer""" )
_A : List[Any] = ConfigClass(**checkpoint["""model_args"""] )
_A : Optional[Any] = ModelClass(config=snake_case_ )
_A : Optional[Any] = GenerationConfigClass()
_A : Dict = model_generation_config
_A : Tuple = checkpoint["""model"""]
# fixup checkpoint
_A : str = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(snake_case_ ):
# replace part of the key with corresponding layer name in HF implementation
_A : Union[str, Any] = k[len(snake_case_ ) :]
for old_layer_name in new_layer_name_dict:
_A : Optional[Any] = new_k.replace(snake_case_,new_layer_name_dict[old_layer_name] )
_A : Tuple = state_dict.pop(snake_case_ )
_A : Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() )
_A : str = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
_A : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
_A : Any = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(snake_case_ ) != 0:
raise ValueError(f'''extra keys found: {extra_keys}''' )
if len(snake_case_ ) != 0:
raise ValueError(f'''missing keys: {missing_keys}''' )
model.load_state_dict(snake_case_,strict=snake_case_ )
_A : int = model.num_parameters(exclude_embeddings=snake_case_ )
_A : Optional[Any] = checkpoint["""best_val_loss"""].item()
logger.info(f'''model loaded: {round(n_params/1e6,1 )}M params, {round(snake_case_,3 )} loss''' )
model.eval()
model.to(snake_case_ )
del checkpoint, state_dict
return model
def lowerCAmelCase_ ( snake_case_,snake_case_=False,snake_case_="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_A : Tuple = """cpu""" # do conversion on cpu
_A : Tuple = _get_ckpt_path(snake_case_,use_small=snake_case_ )
_A : List[str] = _load_model(snake_case_,snake_case_,model_type=snake_case_,use_small=snake_case_ )
# load bark initial model
_A : int = _bark_load_model(snake_case_,"""cpu""",model_type=snake_case_,use_small=snake_case_ )
if model_type == "text":
_A : List[Any] = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=snake_case_ ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
_A : List[Any] = 5
_A : List[Any] = 10
if model_type in ["text", "coarse"]:
_A : Any = torch.randint(256,(batch_size, sequence_length),dtype=torch.int )
_A : Optional[Any] = bark_model(snake_case_ )[0]
_A : Union[str, Any] = model(snake_case_ )
# take last logits
_A : Union[str, Any] = output_new_model_total.logits[:, [-1], :]
else:
_A : Tuple = 3
_A : Union[str, Any] = 8
_A : int = torch.randint(256,(batch_size, sequence_length, n_codes_total),dtype=torch.int )
_A : Union[str, Any] = model(snake_case_,snake_case_ )
_A : Dict = bark_model(snake_case_,snake_case_ )
_A : List[str] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,):
_A : List[str] = os.path.join(snake_case_,snake_case_ )
_A : List[str] = BarkSemanticConfig.from_pretrained(os.path.join(snake_case_,"""config.json""" ) )
_A : Union[str, Any] = BarkCoarseConfig.from_pretrained(os.path.join(snake_case_,"""config.json""" ) )
_A : str = BarkFineConfig.from_pretrained(os.path.join(snake_case_,"""config.json""" ) )
_A : Dict = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
_A : Optional[Any] = BarkSemanticModel.from_pretrained(snake_case_ )
_A : List[str] = BarkCoarseModel.from_pretrained(snake_case_ )
_A : Optional[Any] = BarkFineModel.from_pretrained(snake_case_ )
_A : Union[str, Any] = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
_A : List[Any] = BarkConfig.from_sub_model_configs(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Union[str, Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config,coarseAcoustic.generation_config,fineAcoustic.generation_config )
_A : List[str] = BarkModel(snake_case_ )
_A : Tuple = semantic
_A : Optional[int] = coarseAcoustic
_A : List[str] = fineAcoustic
_A : Optional[int] = codec
_A : List[str] = bark_generation_config
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
bark.save_pretrained(snake_case_,repo_id=snake_case_,push_to_hub=snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
_snake_case = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 701
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_MASKED_LM_MAPPING
_a = TF_MODEL_FOR_MASKED_LM_MAPPING
def a__ ( self ) -> Tuple:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a__ ( self ) -> Any:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
_A : Tuple = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
_A : List[str] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> str:
_A : Any = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
_A : List[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
_A : str = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def a__ ( self ) -> Union[str, Any]:
_A : int = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
_A : Optional[Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_a , _a )
@slow
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(_a )
@slow
@require_tf
def a__ ( self ) -> Tuple:
_A : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(_a )
def a__ ( self , _a ) -> Tuple:
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
_A : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
_A : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> Tuple:
_A : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
_A : str = None
_A : Union[str, Any] = None
self.run_pipeline_test(_a , [] )
@require_tf
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
_A : Any = None
_A : Dict = None
self.run_pipeline_test(_a , [] )
def a__ ( self , _a , _a , _a ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_A : Optional[Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Tuple = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a__ ( self , _a , _a ) -> Dict:
_A : Dict = fill_masker.tokenizer
_A : List[str] = fill_masker.model
_A : List[str] = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Optional[Any] = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
with self.assertRaises(_a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_a ):
fill_masker("""This is""" )
self.run_test_top_k(_a , _a )
self.run_test_targets(_a , _a )
self.run_test_top_k_targets(_a , _a )
self.fill_mask_with_duplicate_targets_and_top_k(_a , _a )
self.fill_mask_with_multiple_masks(_a , _a )
def a__ ( self , _a , _a ) -> List[str]:
_A : int = tokenizer.get_vocab()
_A : str = sorted(vocab.keys() )[:2]
# Pipeline argument
_A : Tuple = FillMaskPipeline(model=_a , tokenizer=_a , targets=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Call argument
_A : str = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : int = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Score equivalence
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Optional[int] = [top_mask["""token_str"""] for top_mask in outputs]
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ) == set(_a ):
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
# Raises with invalid
with self.assertRaises(_a ):
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_a ):
_A : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(_a ):
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="""""" )
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : str = FillMaskPipeline(model=_a , tokenizer=_a , top_k=2 )
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Union[str, Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> List[Any]:
_A : Union[str, Any] = tokenizer.get_vocab()
_A : int = FillMaskPipeline(model=_a , tokenizer=_a )
# top_k=2, ntargets=3
_A : List[str] = sorted(vocab.keys() )[:3]
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_A : Any = [el["""token_str"""] for el in sorted(_a , key=lambda _a : x["score"] , reverse=_a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ).issubset(_a ):
_A : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> str:
_A : Optional[int] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
_A : Optional[Any] = sorted(vocab.keys() )[:3]
_A : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_A : Union[str, Any] = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_a , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_a ) , 3 )
def a__ ( self , _a , _a ) -> Tuple:
_A : Any = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[Any] = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
| 54
| 0
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(snake_case_ )
_A : List[Any] = list(snake_case_ )
_A : Tuple = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_A : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
while True:
_A : int = ["""$"""] * len(snake_case_ )
_A : Any = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1,len(snake_case_ ) ):
_A : Tuple = compare_string(binary[i],binary[j] )
if k is False:
_A : str = """*"""
_A : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_A : Dict = list(set(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = []
for minterm in minterms:
_A : Tuple = """"""
for _ in range(snake_case_ ):
_A : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = list(snake_case_ )
_A : Tuple = list(snake_case_ )
_A : Dict = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = []
_A : str = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_A : Union[str, Any] = 0
_A : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_A : Dict = j
if count == 1:
_A : int = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_A : int = 0
temp.append(prime_implicants[i] )
while True:
_A : Optional[Any] = 0
_A : Tuple = -1
_A : List[Any] = 0
for i in range(len(snake_case_ ) ):
_A : List[str] = chart[i].count(1 )
if count_n > max_n:
_A : Optional[int] = count_n
_A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_A : Optional[int] = 0
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i],binary[j],snake_case_ ):
_A : Union[str, Any] = 1
return chart
def lowerCAmelCase_ ( ):
_A : Dict = int(input("""Enter the no. of variables\n""" ) )
_A : Dict = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_A : int = decimal_to_binary(snake_case_,snake_case_ )
_A : Optional[Any] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_A : int = prime_implicant_chart(snake_case_,snake_case_ )
_A : int = selection(snake_case_,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 702
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def a__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
_A : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_A : str = dict(zip(_a , range(len(_a ) ) ) )
_A : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_A : str = {"""unk_token""": """<unk>"""}
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> str:
_A : Tuple = """lower newer"""
_A : Optional[Any] = """lower newer"""
return input_text, output_text
def a__ ( self ) -> List[Any]:
_A : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A : str = """lower newer"""
_A : List[str] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_A : int = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : str = tokens + [tokenizer.unk_token]
_A : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@require_ftfy
def a__ ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : Dict = self.tokenizer_class.from_pretrained(_a , **_a )
_A : str = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_A : List[str] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_A : Tuple = """xa\u0303y""" + """ """ + """x\xe3y"""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of space type
_A : Any = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_A : Optional[int] = tokenizer_s.tokenize(_a )
_A : List[Any] = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of line break type
_A : int = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_A : Optional[Any] = tokenizer_s.tokenize(_a )
_A : Tuple = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_A : str = F'''{text_of_1_token} {text_of_1_token}'''
_A : str = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Dict = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_A : Any = F''' {text}'''
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
def a__ ( self ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_a ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def a__ ( self ) -> str:
super().test_tokenization_python_rust_equals()
def a__ ( self ) -> Union[str, Any]:
# CLIP always lower cases letters
pass
| 54
| 0
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=1024,snake_case_=1024,snake_case_=False,**snake_case_ ):
_A : Tuple = AutoTokenizer.from_pretrained(snake_case_ )
_A : Tuple = SeqaSeqDataset(snake_case_,snake_case_,snake_case_,snake_case_,type_path="""train""",**snake_case_ )
_A : List[str] = tok.pad_token_id
def get_lens(snake_case_ ):
_A : Union[str, Any] = tqdm(
DataLoader(snake_case_,batch_size=512,num_workers=8,shuffle=snake_case_,collate_fn=ds.collate_fn ),desc=str(ds.len_file ),)
_A : Tuple = []
for batch in dl:
_A : Dict = batch["""input_ids"""].ne(snake_case_ ).sum(1 ).tolist()
_A : Tuple = batch["""labels"""].ne(snake_case_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(snake_case_,snake_case_ ):
max_lens.append(max(snake_case_,snake_case_ ) )
else:
max_lens.extend(snake_case_ )
return max_lens
_A : List[Any] = get_lens(snake_case_ )
_A : Any = SeqaSeqDataset(snake_case_,snake_case_,snake_case_,snake_case_,type_path="""val""",**snake_case_ )
_A : Dict = get_lens(snake_case_ )
pickle_save(snake_case_,train_ds.len_file )
pickle_save(snake_case_,val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 703
|
from datetime import datetime as dt
import os
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCAmelCase_ ( ):
_A : int = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Tuple = g.get_repo("""huggingface/transformers""" )
_A : Dict = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Optional[Any] = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 54
| 0
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase :
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Model name or path of model to be trained."} )
_a = field(
default="./",metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
_a = field(
default="codeparrot/codeparrot-clean-train",metadata={"help": "Name or path of training dataset."} )
_a = field(
default="codeparrot/codeparrot-clean-valid",metadata={"help": "Name or path of validation dataset."} )
_a = field(default=2,metadata={"help": "Batch size for training."} )
_a = field(default=2,metadata={"help": "Batch size for evaluation."} )
_a = field(default=0.1,metadata={"help": "Value of weight decay."} )
_a = field(
default=1_0_0_0_0,metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
_a = field(default=2e-4,metadata={"help": "Learning rate fo training."} )
_a = field(default="cosine",metadata={"help": "Learning rate."} )
_a = field(
default=7_5_0,metadata={"help": "Number of warmup steps in the learning rate schedule."} )
_a = field(
default=1_6,metadata={"help": "Number of gradient accumulation steps."} )
_a = field(
default=UpperCamelCase__,metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
_a = field(default=5_0_0_0_0,metadata={"help": "Maximum number of training steps."} )
_a = field(
default=-1,metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_a = field(default=1_0_2_4,metadata={"help": "Sequence lengths used for training."} )
_a = field(default=1,metadata={"help": "Training seed."} )
_a = field(
default=1_0_2_4,metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."},)
_a = field(
default=UpperCamelCase__,metadata={"help": "States path if the training should continue from a checkpoint folder."} )
_a = field(default=UpperCamelCase__,metadata={"help": "If True the data is pretokenized."} )
@dataclass
class lowercase :
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Model name or path of model to be evaluated."} )
_a = field(
default="codeparrot/codeparrot-clean-valid",metadata={"help": "Name or path of validation dataset."} )
_a = field(default=2,metadata={"help": "Batch size used for evaluation."} )
_a = field(
default=-1,metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_a = field(default=1_0_2_4,metadata={"help": "Length of sequences to be evaluated."} )
_a = field(default=1,metadata={"help": "Random seed used for evaluation."} )
@dataclass
class lowercase :
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Model name or path of model to be evaluated."} )
_a = field(default=UpperCamelCase__,metadata={"help": "Number of workers used for code evaluation."} )
_a = field(
default=UpperCamelCase__,metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."},)
_a = field(
default=UpperCamelCase__,metadata={"help": "Sample from the language model's output distribution."} )
_a = field(default=0.2,metadata={"help": "Sampling temperature used for generation."} )
_a = field(default=2_5_6,metadata={"help": "Maximum number of newly generated tokens."} )
_a = field(default=0,metadata={"help": "Top-k parameter used for generation."} )
_a = field(default=0.95,metadata={"help": "Top-p parameter used for nucleus sampling."} )
_a = field(default=1_0,metadata={"help": "Number of generations to run in parallel."} )
_a = field(
default=2_0_0,metadata={"help": "Number of completions to generate for each sample."} )
_a = field(default=1,metadata={"help": "Random seed used for evaluation."} )
_a = field(
default="eval_results.json",metadata={"help": "Random seed used for evaluation."} )
_a = field(
default="0",metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
_a = field(
default=-1,metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
},)
@dataclass
class lowercase :
_a = field(
default=UpperCamelCase__,metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
},)
_a = field(
default="transformersbook/codeparrot",metadata={"help": "Folder or name of dataset to process."} )
_a = field(
default="codeparrot-clean",metadata={"help": "Folder to save processed processed dataset."} )
_a = field(
default=1_0_0_0_0_0,metadata={"help": "Number of files to save per JSON output file."} )
_a = field(default="content",metadata={"help": "Column containing text data to process."} )
_a = field(
default=1_0_0_0,metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
_a = field(
default=1_0_0,metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
_a = field(
default=0.25,metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
_a = field(
default=1.5,metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
_a = field(
default=0.7,metadata={"help": "Probability for filtering config, test and uncommon files."} )
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Name or path to the tokenizer."},)
_a = field(
default=UpperCamelCase__,metadata={"help": "If True, near-duplicate samples are removed."} )
_a = field(
default=0.85,metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class lowercase :
_a = field(
default="gpt2",metadata={"help": "Base tokenizer to build new tokenizer from."} )
_a = field(
default="transformersbook/codeparrot-train",metadata={"help": "Dataset to train tokenizer on."} )
_a = field(default="content",metadata={"help": "Column containing text data to process."} )
_a = field(default=2_0_0_0_0_0,metadata={"help": "Number of examples to train tokenizer on."} )
_a = field(
default=3_2_7_6_8,metadata={"help": "Number of examples to train the tokenizer on."} )
_a = field(default="codeparrot",metadata={"help": "Name of new tokenizer."} )
_a = field(default=UpperCamelCase__,metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class lowercase :
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Name or path to the tokenizer."} )
_a = field(
default="codeparrot/codeparrot-clean-train",metadata={"help": "Name or path to the dataset to pretokenize."} )
_a = field(
default="tokenized-codeparrot-train",metadata={"help": "Repo name of the pretokenized data."} )
_a = field(default=UpperCamelCase__,metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class lowercase :
_a = field(
default="gpt2-large",metadata={"help": "Configuration to use for model initialization."} )
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Tokenizer attached to model."} )
_a = field(default="codeparrot",metadata={"help": "Name of the created model."} )
_a = field(default=UpperCamelCase__,metadata={"help": "Push saved tokenizer to the hub."} )
| 704
|
from __future__ import annotations
class lowercase :
def __init__( self , _a = 0 ) -> str:
_A : Any = key
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def a__ ( self , _a , _a ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54
| 0
|
import sys
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = len(snake_case_ )
_A : int = [[0 for x in range(snake_case_ )] for x in range(snake_case_ )]
_A : Union[str, Any] = [[0 for x in range(snake_case_ )] for x in range(snake_case_ )]
for chain_length in range(2,snake_case_ ):
for a in range(1,n - chain_length + 1 ):
_A : str = a + chain_length - 1
_A : Optional[int] = sys.maxsize
for c in range(snake_case_,snake_case_ ):
_A : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_A : List[Any] = cost
_A : int = c
return matrix, sol
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if i == j:
print("""A""" + str(snake_case_ ),end=""" """ )
else:
print("""(""",end=""" """ )
print_optiomal_solution(snake_case_,snake_case_,optimal_solution[i][j] )
print_optiomal_solution(snake_case_,optimal_solution[i][j] + 1,snake_case_ )
print(""")""",end=""" """ )
def lowerCAmelCase_ ( ):
_A : List[Any] = [30, 35, 15, 5, 10, 20, 25]
_A : List[str] = len(snake_case_ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_A : Union[str, Any] = matrix_chain_order(snake_case_ )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(snake_case_,1,n - 1 )
if __name__ == "__main__":
main()
| 705
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=10 , _a=160 , _a=8 , _a=0.0 , _a=4000 , _a=False , _a=True , ) -> Optional[int]:
_A : Any = parent
_A : List[Any] = batch_size
_A : List[Any] = min_seq_length
_A : Dict = max_seq_length
_A : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = padding_value
_A : Tuple = sampling_rate
_A : str = return_attention_mask
_A : Any = do_normalize
_A : Union[str, Any] = feature_size
_A : List[Any] = chunk_length
_A : List[Any] = hop_length
def a__ ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , _a=False , _a=False ) -> List[str]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : Any = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = WhisperFeatureExtractor if is_speech_available() else None
def a__ ( self ) -> Tuple:
_A : Optional[int] = WhisperFeatureExtractionTester(self )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Tuple = feat_extract_first.to_dict()
_A : List[Any] = feat_extract_second.to_dict()
_A : List[Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Dict:
_A : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Dict = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Optional[int] = self.feature_extraction_class.from_json_file(_a )
_A : str = feat_extract_first.to_dict()
_A : Any = feat_extract_second.to_dict()
_A : Union[str, Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_A : Dict = feature_extractor(_a , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_A : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_A : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : Tuple = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Any = np.asarray(_a )
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : int = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_A : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs]
_A : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
_A : Optional[int] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : List[Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Dict:
import torch
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
_A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Optional[Any] = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Tuple:
# fmt: off
_A : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_A : Dict = self._load_datasamples(1 )
_A : Optional[Any] = WhisperFeatureExtractor()
_A : Optional[Any] = feature_extractor(_a , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def a__ ( self ) -> str:
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : str = self._load_datasamples(1 )[0]
_A : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_A : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 54
| 0
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> int:
_A : Any = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
_A : Optional[int] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
_A : int = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
_A : Union[str, Any] = tf_top_k_top_p_filtering(_a , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
_A : Tuple = output[output != -float("""inf""" )]
_A : List[Any] = tf.cast(
tf.where(tf.not_equal(_a , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_a , _a , rtol=1e-12 )
tf.debugging.assert_equal(_a , _a )
@require_tf
class lowercase ( unittest.TestCase,UpperCamelCase__ ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
_a = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def a__ ( self ) -> List[Any]:
# TF-only test: tf.saved_model export
_A : Optional[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_A : int = 2
_A : int = 2
class lowercase ( tf.Module ):
def __init__( self , _a ) -> List[str]:
super(_a , self ).__init__()
_A : int = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_a , )
def a__ ( self , _a , _a ) -> Any:
_A : Any = self.model.generate(
input_ids=_a , attention_mask=_a , max_new_tokens=_a , return_dict_in_generate=_a , )
return {"sequences": outputs["sequences"]}
_A : int = [[2, 0], [102, 103]]
_A : Any = [[1, 0], [1, 1]]
_A : Optional[int] = DummyModel(model=_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_a , _a , signatures={"""serving_default""": dummy_model.serving} )
_A : Union[str, Any] = tf.saved_model.load(_a ).signatures["""serving_default"""]
for batch_size in range(1 , len(_a ) + 1 ):
_A : List[Any] = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
_A : List[Any] = serving_func(**_a )["""sequences"""]
_A : List[Any] = test_model.generate(**_a , max_new_tokens=_a )
tf.debugging.assert_equal(_a , _a )
@slow
def a__ ( self ) -> str:
# TF-only test: tf.saved_model export
_A : Tuple = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_A : Union[str, Any] = 1
_A : str = 2
class lowercase ( tf.Module ):
def __init__( self , _a ) -> str:
super(_a , self ).__init__()
_A : Optional[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_a , )
def a__ ( self , _a , _a ) -> Any:
_A : Tuple = self.model.generate(
input_ids=_a , attention_mask=_a , max_new_tokens=_a , return_dict_in_generate=_a , )
return {"sequences": outputs["sequences"]}
_A : Optional[Any] = [[2], [102, 103]]
_A : List[Any] = [[1], [1, 1]]
_A : List[Any] = DummyModel(model=_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_a , _a , signatures={"""serving_default""": dummy_model.serving} )
_A : Optional[int] = tf.saved_model.load(_a ).signatures["""serving_default"""]
for input_row in range(len(_a ) ):
_A : List[str] = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
_A : int = serving_func(**_a )["""sequences"""]
_A : str = test_model.generate(**_a , max_new_tokens=_a )
tf.debugging.assert_equal(_a , _a )
@slow
@require_tensorflow_text
def a__ ( self ) -> Tuple:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=_a )
class lowercase ( tf.keras.layers.Layer ):
def __init__( self ) -> Tuple:
super().__init__()
_A : str = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_a , """spiece.model""" ) , """rb""" ).read() )
_A : str = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def a__ ( self , _a , *_a , **_a ) -> str:
_A : Any = self.tokenizer.tokenize(_a )
_A : Any = text.pad_model_inputs(
_a , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
_A : Any = self.model.generate(input_ids=_a , attention_mask=_a )
return self.tokenizer.detokenize(_a )
_A : Optional[int] = CompleteSentenceTransformer()
_A : Dict = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
_A : List[str] = complete_model(_a )
_A : List[Any] = tf.keras.Model(_a , _a )
keras_model.save(_a )
def a__ ( self ) -> Optional[Any]:
# Has PT equivalent: this test relies on random sampling
_A : Optional[int] = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
_A : Any = 14
_A : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_A : str = """Hello, my dog is cute and"""
_A : Optional[Any] = tokenizer(_a , return_tensors="""tf""" )
_A : List[str] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_A : Optional[Any] = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
_A : Dict = model.generate(**_a , eos_token_id=_a , **_a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
_A : int = [638, 198]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
_A : int = model.generate(**_a , eos_token_id=_a , **_a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def a__ ( self ) -> Optional[Any]:
# Has PT equivalent: ample use of framework-specific code
_A : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
_A : Tuple = """Hugging Face is a technology company based in New York and Paris."""
_A : Union[str, Any] = bart_tokenizer(_a , return_tensors="""tf""" ).input_ids
_A : Dict = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
_A : Optional[Any] = bart_model.generate(_a ).numpy()
class lowercase ( UpperCamelCase__ ):
def a__ ( self , _a , _a=None , **_a ) -> Dict:
return super().call(_a , **_a )
_A : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
_A : List[str] = bart_model.generate(_a , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(_a , _a ) )
class lowercase ( bart_model.model.encoder.__class__ ):
def a__ ( self , _a , **_a ) -> Union[str, Any]:
return super().call(_a , **_a )
_A : Optional[int] = FakeEncoder(bart_model.config , bart_model.model.shared )
_A : Optional[int] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
_A : Optional[int] = bart_model.generate(_a ).numpy()
with self.assertRaises(_a ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_a , foo="""bar""" )
| 706
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( snake_case_ ):
return data[1:] + data[0]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = """"""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = int("""0b""" + data[0] + data[-1],2 )
_A : Any = int("""0b""" + data[1:3],2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = message[:4]
_A : List[Any] = message[4:]
_A : Union[str, Any] = apply_table(snake_case_,snake_case_ )
_A : List[Any] = xor(snake_case_,snake_case_ )
_A : Optional[Any] = apply_sbox(snake_case_,temp[:4] ) # noqa: E741
_A : List[Any] = apply_sbox(snake_case_,temp[4:] )
_A : int = """0""" * (2 - len(snake_case_ )) + l # noqa: E741
_A : Union[str, Any] = """0""" * (2 - len(snake_case_ )) + r
_A : List[Any] = apply_table(l + r,snake_case_ )
_A : Any = xor(snake_case_,snake_case_ )
return temp + right
if __name__ == "__main__":
_snake_case = input("Enter 10 bit key: ")
_snake_case = input("Enter 8 bit message: ")
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 54
| 0
|
import math
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if (
not isinstance(snake_case_,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if (
not isinstance(snake_case_,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Any = size if size is not None else {"""height""": 18, """width""": 18}
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : List[Any] = num_channels
_A : List[str] = image_size
_A : Optional[Any] = min_resolution
_A : List[Any] = max_resolution
_A : Optional[Any] = do_resize
_A : str = size
_A : List[str] = do_normalize
_A : Dict = image_mean
_A : int = image_std
def a__ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> List[str]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 54
| 0
|
class lowercase :
def __init__( self ) -> Optional[Any]:
_A : int = 0
_A : Any = 0
_A : Union[str, Any] = {}
def a__ ( self , _a ) -> int:
if vertex not in self.adjacency:
_A : Any = {}
self.num_vertices += 1
def a__ ( self , _a , _a , _a ) -> str:
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_A : Optional[int] = weight
_A : int = weight
def a__ ( self ) -> List[str]:
_A : List[str] = self.get_edges()
for edge in edges:
_A : Tuple = edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_A : Optional[int] = list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_A : Optional[Any] = edges[i][2] + 1
for edge in edges:
_A : Any = edge
_A : Any = weight
_A : Optional[Any] = weight
def __str__( self ) -> Optional[int]:
_A : Any = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_A : str = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def a__ ( self ) -> Tuple:
_A : str = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self ) -> str:
return self.adjacency.keys()
@staticmethod
def a__ ( _a=None , _a=None ) -> Any:
_A : str = Graph()
if vertices is None:
_A : List[Any] = []
if edges is None:
_A : Optional[int] = []
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class lowercase :
def __init__( self ) -> Optional[Any]:
_A : int = {}
_A : List[str] = {}
def __len__( self ) -> Optional[int]:
return len(self.parent )
def a__ ( self , _a ) -> Union[str, Any]:
if item in self.parent:
return self.find(_a )
_A : Union[str, Any] = item
_A : Any = 0
return item
def a__ ( self , _a ) -> Tuple:
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_A : Tuple = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self , _a , _a ) -> Optional[int]:
_A : int = self.find(_a )
_A : Any = self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_A : Optional[int] = roota
return roota
if self.rank[roota] < self.rank[roota]:
_A : Optional[int] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_A : Optional[int] = roota
return roota
return None
@staticmethod
def a__ ( _a ) -> int:
_A : List[Any] = graph.num_vertices
_A : Optional[int] = Graph.UnionFind()
_A : List[str] = []
while num_components > 1:
_A : Union[str, Any] = {}
for vertex in graph.get_vertices():
_A : List[Any] = -1
_A : List[str] = graph.get_edges()
for edge in edges:
_A : Any = edge
edges.remove((tail, head, weight) )
for edge in edges:
_A : int = edge
_A : Tuple = union_find.find(_a )
_A : str = union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_A : Optional[Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_A : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_A : Union[str, Any] = cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_A : Any = num_components - 1
_A : List[str] = Graph.build(edges=_a )
return mst
| 708
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a=None , _a=None , *_a , **_a ) -> Optional[int]:
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_A : Optional[Any] = self.model.config
else:
_A : int = config
_A : Optional[Any] = data_args
_A : int = self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_A : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_A : Union[str, Any] = label_smoothed_nll_loss
def a__ ( self , _a ) -> int:
if self.optimizer is None:
_A : List[str] = ["""bias""", """LayerNorm.weight"""]
_A : str = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_A : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_A : Dict = Adafactor
_A : int = {"""scale_parameter""": False, """relative_step""": False}
else:
_A : int = AdamW
_A : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_A : List[str] = self.args.learning_rate
if self.sharded_ddp:
_A : List[str] = OSS(
params=_a , optim=_a , **_a , )
else:
_A : Tuple = optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
_A : Union[str, Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def a__ ( self , _a ) -> Dict:
_A : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_A : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_A : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_A : List[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def a__ ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def a__ ( self , _a , _a , _a ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_A : List[str] = model(**_a , use_cache=_a )[0]
_A : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_A , _A : str = model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
_A : Any = model(**_a , use_cache=_a )[0]
_A : Union[str, Any] = torch.nn.functional.log_softmax(_a , dim=-1 )
_A , _A : List[str] = self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def a__ ( self , _a , _a ) -> List[Any]:
_A : Optional[int] = inputs.pop("""labels""" )
_A , _A : Dict = self._compute_loss(_a , _a , _a )
return loss
def a__ ( self , _a , _a , _a , _a = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_A : int = self._prepare_inputs(_a )
_A : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_A : List[str] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_A : str = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
_A : Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_A , _A : Tuple = self._compute_loss(_a , _a , _a )
_A : Any = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_A : List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_A : Any = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def a__ ( self , _a , _a ) -> Union[str, Any]:
# If PAD token is not defined at least EOS token has to be defined
_A : Optional[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F''' padded to `max_length`={max_length}''' )
_A : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_A : Dict = tensor
return padded_tensor
| 54
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( UpperCamelCase__ ):
_a = 4_2
_a = 4_2
def __init__( self , _a , _a ) -> int:
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = 1 , _a = 50 , _a = None , _a = "pil" , _a = True , **_a , ) -> Union[Tuple, ImagePipelineOutput]:
_A : int = self.unet.config.sample_size
_A : Any = (batch_size, 3, img_size, img_size)
_A : Optional[Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_A : int = randn_tensor(_a , generator=_a , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_A : int = self.scheduler.schedule[t]
_A : int = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_A : Any = self.scheduler.add_noise_to_input(_a , _a , generator=_a )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_A : Tuple = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_A : Tuple = self.scheduler.step(_a , _a , _a , _a )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_A : Dict = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_A : str = self.scheduler.step_correct(
_a , _a , _a , _a , step_output.prev_sample , step_output["""derivative"""] , )
_A : str = step_output.prev_sample
_A : Tuple = (sample / 2 + 0.5).clamp(0 , 1 )
_A : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A : int = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 709
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_,snake_case_ ) -> bool:
_A : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A : Optional[int] = mean(
int(is_in_circle(uniform(-1.0,1.0 ),uniform(-1.0,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
_A : List[str] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0.0,snake_case_ = 1.0,):
return mean(
function_to_integrate(uniform(snake_case_,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
def identity_function(snake_case_ ) -> float:
return x
_A : Any = area_under_curve_estimator(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def lowerCAmelCase_ ( snake_case_ ):
def function_to_integrate(snake_case_ ) -> float:
return sqrt(4.0 - x * x )
_A : Optional[int] = area_under_curve_estimator(
snake_case_,snake_case_,0.0,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 4_2
_a = 4_2
_a = None
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = 2
@register_to_config
def __init__( self , _a = 0.02 , _a = 100 , _a = 1.007 , _a = 80 , _a = 0.05 , _a = 50 , ) -> str:
# standard deviation of the initial noise distribution
_A : Tuple = sigma_max
# setable values
_A : int = None
_A : np.IntTensor = None
_A : torch.FloatTensor = None # sigma(t_i)
def a__ ( self , _a , _a = None ) -> torch.FloatTensor:
return sample
def a__ ( self , _a , _a = None ) -> int:
_A : List[Any] = num_inference_steps
_A : int = np.arange(0 , self.num_inference_steps )[::-1].copy()
_A : Tuple = torch.from_numpy(_a ).to(_a )
_A : Tuple = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_A : List[str] = torch.tensor(_a , dtype=torch.floataa , device=_a )
def a__ ( self , _a , _a , _a = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
_A : Tuple = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_A : str = 0
# sample eps ~ N(0, S_noise^2 * I)
_A : Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=_a ).to(sample.device )
_A : Union[str, Any] = sigma + gamma * sigma
_A : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def a__ ( self , _a , _a , _a , _a , _a = True , ) -> Union[KarrasVeOutput, Tuple]:
_A : Any = sample_hat + sigma_hat * model_output
_A : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat
_A : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_a , derivative=_a , pred_original_sample=_a )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a = True , ) -> Union[KarrasVeOutput, Tuple]:
_A : Union[str, Any] = sample_prev + sigma_prev * model_output
_A : List[str] = (sample_prev - pred_original_sample) / sigma_prev
_A : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_a , derivative=_a , pred_original_sample=_a )
def a__ ( self , _a , _a , _a ) -> Dict:
raise NotImplementedError()
| 710
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> int:
_A : int = tempfile.mkdtemp()
_A : Union[str, Any] = 8
# DPR tok
_A : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_A : List[str] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_A : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_A : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_A : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A : Dict = {"""unk_token""": """<unk>"""}
_A : Optional[Any] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_A : List[Any] = os.path.join(_a , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def a__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def a__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def a__ ( self ) -> str:
_A : Optional[Any] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
_A : int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_A : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_a )
rag_tokenizer.save_pretrained(_a )
_A : Optional[Any] = RagTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def a__ ( self ) -> str:
_A : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
_A : Tuple = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Tuple = tokenizer(_a )
self.assertIsNotNone(_a )
@slow
def a__ ( self ) -> Dict:
_A : Dict = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
_A : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Optional[Any] = tokenizer(_a )
self.assertIsNotNone(_a )
| 54
| 0
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def lowerCAmelCase_ ( snake_case_ ):
_A : List[str] = test_results.split(""" """ )
_A : Optional[int] = 0
_A : Tuple = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_A : int = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(snake_case_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = {}
_A : Optional[Any] = None
_A : Union[str, Any] = False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""",snake_case_ ):
_A : int = True
_A : Optional[int] = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
_A : int = line
_A : Dict = False
return failures
class lowercase :
def __init__( self , _a , _a ) -> str:
_A : Dict = title
_A : Dict = doc_test_results["""time_spent"""].split(""",""" )[0]
_A : Optional[Any] = doc_test_results["""success"""]
_A : List[Any] = doc_test_results["""failures"""]
_A : Any = self.n_success + self.n_failures
# Failures and success of the modeling tests
_A : List[str] = doc_test_results
@property
def a__ ( self ) -> str:
_A : str = [self._time_spent]
_A : List[Any] = 0
for time in time_spent:
_A : Optional[int] = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_a ) == 1:
_A : List[str] = [0, 0, time_parts[0]]
_A : Any = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
_A : Optional[Any] = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(_a )}h{int(_a )}m{int(_a )}s'''
@property
def a__ ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def a__ ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def a__ ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def a__ ( self ) -> Dict:
_A : Tuple = 40
_A : Optional[Any] = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(_a , _a )}
_A : Tuple = """"""
for category, failures in category_failures.items():
if len(_a ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def a__ ( self ) -> str:
_A : Tuple = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_a )
@staticmethod
def a__ ( ) -> List[str]:
_A : Tuple = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(_a )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=_a , )
def a__ ( self ) -> Dict:
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
_A : Tuple = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else """All tests passed."""
_A : List[str] = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=_a , )
def a__ ( self , _a , _a , _a , _a ) -> Tuple:
_A : Dict = """"""
for key, value in failures.items():
_A : List[str] = value[:200] + """ [Truncated]""" if len(_a ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
_A : Any = job_name
_A : str = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
_A : List[str] = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def a__ ( self ) -> str:
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
_A : int = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
_A : Tuple = sorted(self.doc_test_results.items() , key=lambda _a : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
_A : Tuple = F'''*Num failures* :{len(job_result["failed"] )} \n'''
_A : Any = job_result["""failures"""]
_A : Tuple = self.get_reply_blocks(_a , _a , _a , text=_a )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=F'''Results for {job}''' , blocks=_a , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def lowerCAmelCase_ ( ):
_A : Optional[int] = os.environ["""GITHUB_RUN_ID"""]
_A : Tuple = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
_A : Tuple = requests.get(snake_case_ ).json()
_A : str = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_A : List[str] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : Optional[Any] = requests.get(url + f'''&page={i + 2}''' ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""",snake_case_ )
return {}
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = {}
if os.path.exists(snake_case_ ):
_A : Optional[Any] = os.listdir(snake_case_ )
for file in files:
try:
with open(os.path.join(snake_case_,snake_case_ ),encoding="""utf-8""" ) as f:
_A : int = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'''Could not open {os.path.join(snake_case_,snake_case_ )}.''' ) from e
return _artifact
def lowerCAmelCase_ ( ):
class lowercase :
def __init__( self , _a ) -> Optional[Any]:
_A : str = name
_A : Any = []
def __str__( self ) -> Optional[Any]:
return self.name
def a__ ( self , _a ) -> Tuple:
self.paths.append({"""name""": self.name, """path""": path} )
_A : Dict[str, Artifact] = {}
_A : Optional[int] = filter(os.path.isdir,os.listdir() )
for directory in directories:
_A : Optional[int] = directory
if artifact_name not in _available_artifacts:
_A : Optional[int] = Artifact(snake_case_ )
_available_artifacts[artifact_name].add_path(snake_case_ )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 711
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_A : str = self.transformer_dir
shutil.copy(
os.path.join(_a , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a__ ( self ) -> Optional[int]:
_A : List[str] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a__ ( self , _a , _a , _a , _a=None ) -> Optional[Any]:
_A : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_A : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_A : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_A : Optional[int] = black.format_str(_a , mode=_a )
_A : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_a , """w""" , newline="""\n""" ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_a )
with open(_a , """r""" ) as f:
self.assertTrue(f.read() , _a )
def a__ ( self ) -> str:
_A : Union[str, Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _a , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _a ) , )
# Copy consistency with a really long name
_A : List[str] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , _a , _a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _a , overwrite_result=re.sub("""Bert""" , """TestModel""" , _a ) , )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_A , _A : Tuple = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
self.assertFalse(_a )
self.assertEqual(_a , _a )
_A , _A : List[str] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_a )
_A : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_A : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A , _A : Optional[int] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_a , _a )
| 54
| 0
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCAmelCase_ ( *snake_case_ ):
with open(snake_case_,"""r""" ) as fh:
fcntl.flock(snake_case_,fcntl.LOCK_EX )
try:
print(*snake_case_ )
finally:
fcntl.flock(snake_case_,fcntl.LOCK_UN )
_snake_case = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
_snake_case = torch.device("cuda", local_rank)
_snake_case = socket.gethostname()
_snake_case = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
_snake_case = dist.get_rank()
_snake_case = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 712
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=2048 , _a=128 , _a=1 , _a=512 , _a=30 , _a=4_4100 , ) -> Tuple:
_A : Any = parent
_A : str = batch_size
_A : Union[str, Any] = min_seq_length
_A : int = max_seq_length
_A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = spectrogram_length
_A : int = feature_size
_A : str = num_audio_channels
_A : Tuple = hop_length
_A : List[str] = chunk_length
_A : Union[str, Any] = sampling_rate
def a__ ( self ) -> Tuple:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a__ ( self , _a=False , _a=False ) -> Optional[int]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : List[Any] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = TvltFeatureExtractor
def a__ ( self ) -> Any:
_A : int = TvltFeatureExtractionTester(self )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , """spectrogram_length""" ) )
self.assertTrue(hasattr(_a , """feature_size""" ) )
self.assertTrue(hasattr(_a , """num_audio_channels""" ) )
self.assertTrue(hasattr(_a , """hop_length""" ) )
self.assertTrue(hasattr(_a , """chunk_length""" ) )
self.assertTrue(hasattr(_a , """sampling_rate""" ) )
def a__ ( self ) -> Optional[int]:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : int = feat_extract_second.to_dict()
_A : int = dict_first.pop("""mel_filters""" )
_A : Optional[int] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Union[str, Any] = self.feature_extraction_class.from_json_file(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : Union[str, Any] = feat_extract_second.to_dict()
_A : List[Any] = dict_first.pop("""mel_filters""" )
_A : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Optional[Any]:
# Initialize feature_extractor
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_A : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A : str = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A : Optional[Any] = feature_extractor(
_a , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Optional[int] = np.asarray(_a )
_A : Optional[Any] = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Dict = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self._load_datasamples(1 )
_A : List[str] = TvltFeatureExtractor()
_A : List[Any] = feature_extractor(_a , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A : int = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 54
| 0
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_=None ):
'''simple docstring'''
_A : Union[str, Any] = XLNetConfig.from_json_file(snake_case_ )
_A : List[str] = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
_A : str = finetuning_task
_A : int = GLUE_TASKS_NUM_LABELS[finetuning_task]
_A : int = XLNetForSequenceClassification(snake_case_ )
elif "squad" in finetuning_task:
_A : Optional[Any] = finetuning_task
_A : str = XLNetForQuestionAnswering(snake_case_ )
else:
_A : Optional[int] = XLNetLMHeadModel(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(snake_case_,snake_case_,snake_case_ )
# Save pytorch-model
_A : Union[str, Any] = os.path.join(snake_case_,snake_case_ )
_A : Any = os.path.join(snake_case_,snake_case_ )
print(f'''Save PyTorch model to {os.path.abspath(snake_case_ )}''' )
torch.save(model.state_dict(),snake_case_ )
print(f'''Save configuration file to {os.path.abspath(snake_case_ )}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 0
|
class lowercase :
def __init__( self ) -> None:
_A : dict[str, TrieNode] = {} # Mapping from char to TrieNode
_A : Optional[Any] = False
def a__ ( self , _a ) -> None:
for word in words:
self.insert(_a )
def a__ ( self , _a ) -> None:
_A : Optional[Any] = self
for char in word:
if char not in curr.nodes:
_A : Optional[Any] = TrieNode()
_A : List[Any] = curr.nodes[char]
_A : Optional[int] = True
def a__ ( self , _a ) -> bool:
_A : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
_A : str = curr.nodes[char]
return curr.is_leaf
def a__ ( self , _a ) -> None:
def _delete(_a , _a , _a ) -> bool:
if index == len(_a ):
# If word does not exist
if not curr.is_leaf:
return False
_A : Any = False
return len(curr.nodes ) == 0
_A : List[Any] = word[index]
_A : Optional[Any] = curr.nodes.get(_a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_A : List[str] = _delete(_a , _a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _a , 0 )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if node.is_leaf:
print(snake_case_,end=""" """ )
for key, value in node.nodes.items():
print_words(snake_case_,word + key )
def lowerCAmelCase_ ( ):
_A : int = """banana bananas bandana band apple all beast""".split()
_A : Optional[int] = TrieNode()
root.insert_many(snake_case_ )
# print_words(root, "")
assert all(root.find(snake_case_ ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
print(str(snake_case_ ),"""works!""" if passes else """doesn't work :(""" )
def lowerCAmelCase_ ( ):
assert test_trie()
def lowerCAmelCase_ ( ):
print_results("""Testing trie functionality""",test_trie() )
if __name__ == "__main__":
main()
| 714
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaImgaImgPipeline
_a = ["image_embeds", "negative_image_embeds", "image"]
_a = [
"image_embeds",
"negative_image_embeds",
"image",
]
_a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> int:
return 32
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> List[str]:
return self.time_input_dim
@property
def a__ ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> str:
return 100
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Union[str, Any] = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> int:
_A : Any = self.dummy_unet
_A : List[Any] = self.dummy_movq
_A : str = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_A : int = DDIMScheduler(**_a )
_A : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> str:
_A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
if str(_a ).startswith("""mps""" ):
_A : Tuple = torch.manual_seed(_a )
else:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Optional[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Union[str, Any]:
_A : Dict = """cpu"""
_A : int = self.get_dummy_components()
_A : Optional[int] = self.pipeline_class(**_a )
_A : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : Dict = output.images
_A : List[str] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : Dict = image[0, -3:, -3:, -1]
_A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[int] = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> List[str]:
_A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_A : Dict = """A red cartoon frog, 4k"""
_A : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : int = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_A : Dict = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A , _A : List[str] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_A : int = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 54
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = 384
_A : List[Any] = 7
if "tiny" in model_name:
_A : str = 96
_A : List[Any] = (2, 2, 6, 2)
_A : Dict = (3, 6, 12, 24)
elif "small" in model_name:
_A : Tuple = 96
_A : str = (2, 2, 18, 2)
_A : Any = (3, 6, 12, 24)
elif "base" in model_name:
_A : Optional[int] = 128
_A : Optional[Any] = (2, 2, 18, 2)
_A : int = (4, 8, 16, 32)
_A : List[Any] = 12
_A : int = 512
elif "large" in model_name:
_A : Any = 192
_A : Any = (2, 2, 18, 2)
_A : Optional[Any] = (6, 12, 24, 48)
_A : Any = 12
_A : Tuple = 768
# set label information
_A : Any = 150
_A : Optional[int] = """huggingface/label-files"""
_A : Optional[Any] = """ade20k-id2label.json"""
_A : Dict = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : str = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : Tuple = {v: k for k, v in idalabel.items()}
_A : Any = SwinConfig(
embed_dim=snake_case_,depths=snake_case_,num_heads=snake_case_,window_size=snake_case_,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""],)
_A : Any = UperNetConfig(
backbone_config=snake_case_,auxiliary_in_channels=snake_case_,num_labels=snake_case_,idalabel=snake_case_,labelaid=snake_case_,)
return config
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = dct.pop(snake_case_ )
_A : Union[str, Any] = val
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_A : str = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_A : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
_A : Optional[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_A : Optional[int] = in_proj_weight[:dim, :]
_A : Tuple = in_proj_bias[: dim]
_A : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
_A : Tuple = in_proj_bias[
dim : dim * 2
]
_A : int = in_proj_weight[
-dim :, :
]
_A : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = x.shape
_A : Optional[Any] = x.reshape(snake_case_,4,in_channel // 4 )
_A : Tuple = x[:, [0, 2, 1, 3], :].transpose(1,2 ).reshape(snake_case_,snake_case_ )
return x
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = x.shape
_A : str = x.reshape(snake_case_,in_channel // 4,4 )
_A : Any = x[:, :, [0, 2, 1, 3]].transpose(1,2 ).reshape(snake_case_,snake_case_ )
return x
def lowerCAmelCase_ ( snake_case_ ):
_A : Any = x.shape[0]
_A : List[Any] = x.reshape(4,in_channel // 4 )
_A : Dict = x[[0, 2, 1, 3], :].transpose(0,1 ).reshape(snake_case_ )
return x
def lowerCAmelCase_ ( snake_case_ ):
_A : List[str] = x.shape[0]
_A : Union[str, Any] = x.reshape(in_channel // 4,4 )
_A : str = x[:, [0, 2, 1, 3]].transpose(0,1 ).reshape(snake_case_ )
return x
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
_A : Optional[int] = model_name_to_url[model_name]
_A : int = torch.hub.load_state_dict_from_url(snake_case_,map_location="""cpu""",file_name=snake_case_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(snake_case_,param.shape )
_A : Tuple = get_upernet_config(snake_case_ )
_A : Optional[Any] = UperNetForSemanticSegmentation(snake_case_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_A : Any = state_dict.pop(snake_case_ )
if "bn" in key:
_A : List[Any] = key.replace("""bn""","""batch_norm""" )
_A : List[Any] = val
# rename keys
_A : Tuple = create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_,snake_case_,snake_case_ )
read_in_q_k_v(snake_case_,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_A : Union[str, Any] = reverse_correct_unfold_reduction_order(snake_case_ )
if "norm" in key:
_A : str = reverse_correct_unfold_norm_order(snake_case_ )
model.load_state_dict(snake_case_ )
# verify on image
_A : Union[str, Any] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
_A : int = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ).convert("""RGB""" )
_A : int = SegformerImageProcessor()
_A : List[Any] = processor(snake_case_,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
_A : Any = model(snake_case_ )
_A : str = outputs.logits
print(logits.shape )
print("""First values of logits:""",logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_A : List[Any] = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] )
elif model_name == "upernet-swin-small":
_A : Optional[Any] = torch.tensor(
[[-7.19_21, -7.19_21, -6.95_32], [-7.19_21, -7.19_21, -6.95_32], [-7.09_08, -7.09_08, -6.85_34]] )
elif model_name == "upernet-swin-base":
_A : Union[str, Any] = torch.tensor(
[[-6.58_51, -6.58_51, -6.43_30], [-6.58_51, -6.58_51, -6.43_30], [-6.47_63, -6.47_63, -6.32_54]] )
elif model_name == "upernet-swin-large":
_A : List[Any] = torch.tensor(
[[-7.52_97, -7.52_97, -7.38_02], [-7.52_97, -7.52_97, -7.38_02], [-7.40_44, -7.40_44, -7.25_86]] )
print("""Logits:""",outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3],snake_case_,atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 715
|
def lowerCAmelCase_ ( snake_case_ = 1000000 ):
_A : Any = limit + 1
_A : Tuple = [0] * limit
for first_term in range(1,snake_case_ ):
for n in range(snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_A : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
| 0
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_snake_case : Any = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def lowerCAmelCase_ ( snake_case_ ):
_A : int = {}
state_dict.pop("""pixel_mean""",snake_case_ )
state_dict.pop("""pixel_std""",snake_case_ )
_A : Tuple = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_A : int = key.replace(snake_case_,snake_case_ )
if re.match(snake_case_,snake_case_ ):
_A : Optional[int] = int(re.match(snake_case_,snake_case_ ).group(2 ) )
if layer_nb == 0:
_A : Tuple = key.replace("""layers.0""","""proj_in""" )
elif layer_nb == 1:
_A : int = key.replace("""layers.1""","""layers.0""" )
elif layer_nb == 2:
_A : Union[str, Any] = key.replace("""layers.2""","""proj_out""" )
_A : Union[str, Any] = value
_A : Tuple = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_="ybelkada/segment-anything" ):
_A : Union[str, Any] = hf_hub_download(snake_case_,f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_A : Dict = SamConfig()
elif "sam_vit_l" in model_name:
_A : Optional[Any] = SamVisionConfig(
hidden_size=1024,num_hidden_layers=24,num_attention_heads=16,global_attn_indexes=[5, 11, 17, 23],)
_A : str = SamConfig(
vision_config=snake_case_,)
elif "sam_vit_h" in model_name:
_A : int = SamVisionConfig(
hidden_size=1280,num_hidden_layers=32,num_attention_heads=16,global_attn_indexes=[7, 15, 23, 31],)
_A : Optional[int] = SamConfig(
vision_config=snake_case_,)
_A : Union[str, Any] = torch.load(snake_case_,map_location="""cpu""" )
_A : Optional[Any] = replace_keys(snake_case_ )
_A : Union[str, Any] = SamImageProcessor()
_A : Any = SamProcessor(image_processor=snake_case_ )
_A : Optional[Any] = SamModel(snake_case_ )
hf_model.load_state_dict(snake_case_ )
_A : List[Any] = hf_model.to("""cuda""" )
_A : int = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
_A : int = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ).convert("""RGB""" )
_A : Dict = [[[400, 650]]]
_A : List[str] = [[1]]
_A : Dict = processor(images=np.array(snake_case_ ),return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
_A : Optional[int] = hf_model(**snake_case_ )
_A : int = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_A : int = processor(
images=np.array(snake_case_ ),input_points=snake_case_,input_labels=snake_case_,return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
_A : Optional[Any] = hf_model(**snake_case_ )
_A : Any = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_A : Optional[int] = ((75, 275, 1725, 850),)
_A : int = processor(images=np.array(snake_case_ ),input_boxes=snake_case_,return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
_A : List[str] = hf_model(**snake_case_ )
_A : Any = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_A : Dict = [[[400, 650], [800, 650]]]
_A : Union[str, Any] = [[1, 1]]
_A : List[str] = processor(
images=np.array(snake_case_ ),input_points=snake_case_,input_labels=snake_case_,return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
_A : Dict = hf_model(**snake_case_ )
_A : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
_snake_case : Optional[int] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
_snake_case : Any = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 716
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Any:
super().__init__()
_A : Dict = pad_token_id
_A : List[Any] = max_length
_A : Optional[int] = vocab
_A : Optional[int] = merges
_A : Optional[int] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> str:
_A : Any = [""" """.join(_a ) for m in tokenizer.bpe_ranks.keys()]
_A : str = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> List[Any]:
_A : Union[str, Any] = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def a__ ( cls , _a ) -> Union[str, Any]:
return cls(**_a )
def a__ ( self ) -> Union[str, Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ ( self , _a , _a = None ) -> int:
_A : Optional[int] = self.tf_tokenizer(_a )
_A : Tuple = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A : Dict = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54
| 0
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowercase ( unittest.TestCase ):
def a__ ( self , _a , _a ) -> str:
_A : List[str] = jnp.ones((batch_size, length) ) / length
return scores
def a__ ( self ) -> Optional[int]:
_A : List[Any] = None
_A : Union[str, Any] = 20
_A : List[str] = self._get_uniform_logits(batch_size=2 , length=_a )
# tweak scores to not be uniform anymore
_A : str = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_A : str = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_A : str = jax.nn.softmax(_a , axis=-1 )
_A : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_A : Tuple = FlaxTemperatureLogitsWarper(temperature=1.3 )
_A : Union[str, Any] = jax.nn.softmax(temp_dist_warper_sharper(_a , scores.copy() , cur_len=_a ) , axis=-1 )
_A : Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(_a , scores.copy() , cur_len=_a ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def a__ ( self ) -> Tuple:
_A : int = None
_A : List[Any] = 10
_A : int = 2
# create ramp distribution
_A : Optional[int] = np.broadcast_to(np.arange(_a )[None, :] , (batch_size, vocab_size) ).copy()
_A : List[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_A : int = FlaxTopKLogitsWarper(3 )
_A : Optional[int] = top_k_warp(_a , _a , cur_len=_a )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_A : Optional[Any] = 5
_A : Any = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_A : Any = np.broadcast_to(np.arange(_a )[None, :] , (batch_size, length) ).copy()
_A : Dict = top_k_warp_safety_check(_a , _a , cur_len=_a )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def a__ ( self ) -> Optional[Any]:
_A : Union[str, Any] = None
_A : Optional[int] = 10
_A : Optional[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_A : Union[str, Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_A : Any = FlaxTopPLogitsWarper(0.8 )
_A : Union[str, Any] = np.exp(top_p_warp(_a , _a , cur_len=_a ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_A : List[Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# check edge cases with negative and extreme logits
_A : List[str] = np.broadcast_to(np.arange(_a )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_A : List[Any] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_A : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_A : int = top_p_warp(_a , _a , cur_len=_a )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def a__ ( self ) -> int:
_A : Tuple = 20
_A : str = 4
_A : Optional[Any] = 0
_A : str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_a )
# check that min length is applied at length 5
_A : Dict = ids_tensor((batch_size, 20) , vocab_size=20 )
_A : Optional[Any] = 5
_A : Tuple = self._get_uniform_logits(_a , _a )
_A : List[str] = min_dist_processor(_a , _a , cur_len=_a )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
_A : Optional[Any] = self._get_uniform_logits(_a , _a )
_A : Tuple = 15
_A : Optional[int] = min_dist_processor(_a , _a , cur_len=_a )
self.assertFalse(jnp.isinf(_a ).any() )
def a__ ( self ) -> Union[str, Any]:
_A : Optional[Any] = 20
_A : Any = 4
_A : str = 0
_A : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_a )
# check that all scores are -inf except the bos_token_id score
_A : Dict = ids_tensor((batch_size, 1) , vocab_size=20 )
_A : Tuple = 1
_A : List[Any] = self._get_uniform_logits(_a , _a )
_A : int = logits_processor(_a , _a , cur_len=_a )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_A : Any = 3
_A : Dict = self._get_uniform_logits(_a , _a )
_A : List[str] = logits_processor(_a , _a , cur_len=_a )
self.assertFalse(jnp.isinf(_a ).any() )
def a__ ( self ) -> Union[str, Any]:
_A : Optional[int] = 20
_A : Optional[int] = 4
_A : Any = 0
_A : Dict = 5
_A : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=_a , eos_token_id=_a )
# check that all scores are -inf except the eos_token_id when max_length is reached
_A : List[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
_A : Optional[Any] = 4
_A : str = self._get_uniform_logits(_a , _a )
_A : Any = logits_processor(_a , _a , cur_len=_a )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_A : Any = 3
_A : int = self._get_uniform_logits(_a , _a )
_A : List[str] = logits_processor(_a , _a , cur_len=_a )
self.assertFalse(jnp.isinf(_a ).any() )
def a__ ( self ) -> List[Any]:
_A : Dict = 4
_A : Tuple = 10
_A : int = 15
_A : int = 2
_A : Any = 1
_A : Tuple = 15
# dummy input_ids and scores
_A : List[Any] = ids_tensor((batch_size, sequence_length) , _a )
_A : Optional[int] = input_ids.copy()
_A : Union[str, Any] = self._get_uniform_logits(_a , _a )
_A : Optional[int] = scores.copy()
# instantiate all dist processors
_A : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5 )
_A : List[str] = FlaxTopKLogitsWarper(3 )
_A : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_A : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_a )
_A : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_a )
_A : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=_a , eos_token_id=_a )
_A : Union[str, Any] = 10
# no processor list
_A : Optional[int] = temp_dist_warp(_a , _a , cur_len=_a )
_A : Tuple = top_k_warp(_a , _a , cur_len=_a )
_A : List[str] = top_p_warp(_a , _a , cur_len=_a )
_A : str = min_dist_proc(_a , _a , cur_len=_a )
_A : Union[str, Any] = bos_dist_proc(_a , _a , cur_len=_a )
_A : Optional[int] = eos_dist_proc(_a , _a , cur_len=_a )
# with processor list
_A : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_A : str = processor(_a , _a , cur_len=_a )
# scores should be equal
self.assertTrue(jnp.allclose(_a , _a , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def a__ ( self ) -> str:
_A : int = 4
_A : Tuple = 10
_A : Optional[Any] = 15
_A : str = 2
_A : str = 1
_A : Dict = 15
# dummy input_ids and scores
_A : int = ids_tensor((batch_size, sequence_length) , _a )
_A : List[Any] = input_ids.copy()
_A : int = self._get_uniform_logits(_a , _a )
_A : Optional[int] = scores.copy()
# instantiate all dist processors
_A : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
_A : Any = FlaxTopKLogitsWarper(3 )
_A : Any = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_A : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_a )
_A : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_a )
_A : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=_a , eos_token_id=_a )
_A : List[str] = 10
# no processor list
def run_no_processor_list(_a , _a , _a ):
_A : List[str] = temp_dist_warp(_a , _a , cur_len=_a )
_A : Any = top_k_warp(_a , _a , cur_len=_a )
_A : str = top_p_warp(_a , _a , cur_len=_a )
_A : Optional[Any] = min_dist_proc(_a , _a , cur_len=_a )
_A : Optional[int] = bos_dist_proc(_a , _a , cur_len=_a )
_A : List[str] = eos_dist_proc(_a , _a , cur_len=_a )
return scores
# with processor list
def run_processor_list(_a , _a , _a ):
_A : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_A : Optional[Any] = processor(_a , _a , cur_len=_a )
return scores
_A : int = jax.jit(_a )
_A : str = jax.jit(_a )
_A : Tuple = jitted_run_no_processor_list(_a , _a , _a )
_A : str = jitted_run_processor_list(_a , _a , _a )
# scores should be equal
self.assertTrue(jnp.allclose(_a , _a , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 717
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=None , ) -> Dict:
_A : List[Any] = size if size is not None else {"""shortest_edge""": 20}
_A : List[Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_A : List[Any] = parent
_A : Optional[int] = batch_size
_A : Union[str, Any] = num_channels
_A : Tuple = image_size
_A : Optional[Any] = min_resolution
_A : int = max_resolution
_A : List[str] = do_resize
_A : Optional[Any] = size
_A : List[str] = do_center_crop
_A : Tuple = crop_size
def a__ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = MobileNetVaImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Dict = MobileNetVaImageProcessingTester(self )
@property
def a__ ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> str:
_A : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
self.assertTrue(hasattr(_a , """do_center_crop""" ) )
self.assertTrue(hasattr(_a , """crop_size""" ) )
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def a__ ( self ) -> List[str]:
pass
def a__ ( self ) -> str:
# Initialize image_processing
_A : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_A : Union[str, Any] = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def a__ ( self ) -> int:
# Initialize image_processing
_A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_A : Union[str, Any] = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 718
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 0
|
from datetime import datetime as dt
import os
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCAmelCase_ ( ):
_A : int = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Tuple = g.get_repo("""huggingface/transformers""" )
_A : Dict = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Optional[Any] = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 719
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Optional[int]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_a )
def a__ ( self ) -> Any:
_A : str = self._create_example_records()
_A : List[Any] = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def a__ ( self ) -> List[str]:
_A : Dict = self._create_example_records()
_A : List[str] = Dataset.from_list(_a )
_A : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self ) -> str: # checks what happens with missing columns
_A : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A : List[str] = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a__ ( self ) -> Dict: # checks if the type can be inferred from the second record
_A : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A : str = Dataset.from_list(_a )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a__ ( self ) -> Dict:
_A : List[str] = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 54
| 0
|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_snake_case = logging.getLogger(__name__)
_snake_case = tf.data.AUTOTUNE
def lowerCAmelCase_ ( ):
_A : Tuple = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""",type=snake_case_,default="""roberta-base""",help="""The model config to use. Note that we don't copy the model's weights, only the config!""",)
parser.add_argument(
"""--tokenizer""",type=snake_case_,default="""unigram-tokenizer-wikitext""",help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""",)
parser.add_argument(
"""--per_replica_batch_size""",type=snake_case_,default=8,help="""Batch size per TPU core.""",)
parser.add_argument(
"""--no_tpu""",action="""store_true""",help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""",)
parser.add_argument(
"""--tpu_name""",type=snake_case_,help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""",default="""local""",)
parser.add_argument(
"""--tpu_zone""",type=snake_case_,help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""",)
parser.add_argument(
"""--gcp_project""",type=snake_case_,help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""",action="""store_true""",help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""",)
parser.add_argument(
"""--train_dataset""",type=snake_case_,help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""",)
parser.add_argument(
"""--shuffle_buffer_size""",type=snake_case_,default=2**18,help="""Size of the shuffle buffer (in samples)""",)
parser.add_argument(
"""--eval_dataset""",type=snake_case_,help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""",)
parser.add_argument(
"""--num_epochs""",type=snake_case_,default=1,help="""Number of epochs to train for.""",)
parser.add_argument(
"""--learning_rate""",type=snake_case_,default=1e-4,help="""Learning rate to use for training.""",)
parser.add_argument(
"""--weight_decay_rate""",type=snake_case_,default=1e-3,help="""Weight decay rate to use for training.""",)
parser.add_argument(
"""--max_length""",type=snake_case_,default=512,help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""",)
parser.add_argument(
"""--mlm_probability""",type=snake_case_,default=0.15,help="""Fraction of tokens to mask during training.""",)
parser.add_argument("""--output_dir""",type=snake_case_,required=snake_case_,help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""",type=snake_case_,help="""Model ID to upload to on the Hugging Face Hub.""" )
_A : Any = parser.parse_args()
return args
def lowerCAmelCase_ ( snake_case_ ):
try:
if args.tpu_name:
_A : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name,zone=args.tpu_zone,project=args.gcp_project )
else:
_A : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(snake_case_ )
tf.tpu.experimental.initialize_tpu_system(snake_case_ )
return tpu
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = 0
for file in file_list:
_A : Optional[Any] = file.split("""/""" )[-1]
_A : List[Any] = re.search(r"""-\d+-(\d+)\.tfrecord""",snake_case_ ).group(1 )
_A : Dict = int(snake_case_ )
num_samples += sample_count
return num_samples
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_=None ):
_A : Tuple = count_samples(snake_case_ )
_A : List[str] = tf.data.Dataset.from_tensor_slices(snake_case_ )
if shuffle:
_A : Union[str, Any] = dataset.shuffle(len(snake_case_ ) )
_A : Dict = tf.data.TFRecordDataset(snake_case_,num_parallel_reads=snake_case_ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_A : Union[str, Any] = dataset.apply(tf.data.experimental.assert_cardinality(snake_case_ ) )
_A : int = dataset.map(snake_case_,num_parallel_calls=snake_case_ )
if shuffle:
assert shuffle_buffer_size is not None
_A : int = dataset.shuffle(args.shuffle_buffer_size )
_A : str = dataset.batch(snake_case_,drop_remainder=snake_case_ )
_A : Tuple = dataset.map(snake_case_,num_parallel_calls=snake_case_ )
_A : Any = dataset.prefetch(snake_case_ )
return dataset
def lowerCAmelCase_ ( snake_case_ ):
if not args.no_tpu:
_A : Dict = initialize_tpu(snake_case_ )
_A : str = tf.distribute.TPUStrategy(snake_case_ )
else:
_A : Optional[int] = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
_A : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer )
_A : int = AutoConfig.from_pretrained(args.pretrained_model_config )
_A : int = tokenizer.vocab_size
_A : Tuple = tf.io.gfile.glob(os.path.join(args.train_dataset,"""*.tfrecord""" ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
_A : Dict = tf.io.gfile.glob(os.path.join(args.eval_dataset,"""*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
_A : Optional[Any] = count_samples(snake_case_ )
_A : List[str] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_A : Any = steps_per_epoch * args.num_epochs
with strategy.scope():
_A : List[str] = TFAutoModelForMaskedLM.from_config(snake_case_ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_A : List[str] = create_optimizer(
num_train_steps=snake_case_,num_warmup_steps=total_train_steps // 20,init_lr=args.learning_rate,weight_decay_rate=args.weight_decay_rate,)
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=snake_case_,metrics=["""accuracy"""] )
def decode_fn(snake_case_ ):
_A : Union[str, Any] = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa,shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa,shape=(args.max_length,) ),
}
return tf.io.parse_single_example(snake_case_,snake_case_ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_A : List[Any] = DataCollatorForLanguageModeling(
tokenizer=snake_case_,mlm_probability=args.mlm_probability,mlm=snake_case_,return_tensors="""tf""" )
def mask_with_collator(snake_case_ ):
# TF really needs an isin() function
_A : Tuple = (
~tf.cast(batch["""attention_mask"""],tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
_A : str = data_collator.tf_mask_tokens(
batch["""input_ids"""],vocab_size=len(snake_case_ ),mask_token_id=tokenizer.mask_token_id,special_tokens_mask=snake_case_,)
return batch
_A : List[str] = args.per_replica_batch_size * strategy.num_replicas_in_sync
_A : Optional[int] = prepare_dataset(
snake_case_,decode_fn=snake_case_,mask_fn=snake_case_,batch_size=snake_case_,shuffle=snake_case_,shuffle_buffer_size=args.shuffle_buffer_size,)
_A : int = prepare_dataset(
snake_case_,decode_fn=snake_case_,mask_fn=snake_case_,batch_size=snake_case_,shuffle=snake_case_,)
_A : Optional[int] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir,hub_model_id=args.hub_model_id,tokenizer=snake_case_ ) )
model.fit(
snake_case_,validation_data=snake_case_,epochs=args.num_epochs,callbacks=snake_case_,)
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_snake_case = parse_args()
main(args)
| 720
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(snake_case_ )
_A : List[Any] = list(snake_case_ )
_A : Tuple = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_A : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
while True:
_A : int = ["""$"""] * len(snake_case_ )
_A : Any = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1,len(snake_case_ ) ):
_A : Tuple = compare_string(binary[i],binary[j] )
if k is False:
_A : str = """*"""
_A : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_A : Dict = list(set(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = []
for minterm in minterms:
_A : Tuple = """"""
for _ in range(snake_case_ ):
_A : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = list(snake_case_ )
_A : Tuple = list(snake_case_ )
_A : Dict = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = []
_A : str = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_A : Union[str, Any] = 0
_A : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_A : Dict = j
if count == 1:
_A : int = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_A : int = 0
temp.append(prime_implicants[i] )
while True:
_A : Optional[Any] = 0
_A : Tuple = -1
_A : List[Any] = 0
for i in range(len(snake_case_ ) ):
_A : List[str] = chart[i].count(1 )
if count_n > max_n:
_A : Optional[int] = count_n
_A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_A : Optional[int] = 0
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i],binary[j],snake_case_ ):
_A : Union[str, Any] = 1
return chart
def lowerCAmelCase_ ( ):
_A : Dict = int(input("""Enter the no. of variables\n""" ) )
_A : Dict = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_A : int = decimal_to_binary(snake_case_,snake_case_ )
_A : Optional[Any] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_A : int = prime_implicant_chart(snake_case_,snake_case_ )
_A : int = selection(snake_case_,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 54
| 0
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=10 , _a=160 , _a=8 , _a=0.0 , _a=4000 , _a=False , _a=True , ) -> Optional[int]:
_A : Any = parent
_A : List[Any] = batch_size
_A : List[Any] = min_seq_length
_A : Dict = max_seq_length
_A : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = padding_value
_A : Tuple = sampling_rate
_A : str = return_attention_mask
_A : Any = do_normalize
_A : Union[str, Any] = feature_size
_A : List[Any] = chunk_length
_A : List[Any] = hop_length
def a__ ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , _a=False , _a=False ) -> List[str]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : Any = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = WhisperFeatureExtractor if is_speech_available() else None
def a__ ( self ) -> Tuple:
_A : Optional[int] = WhisperFeatureExtractionTester(self )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Tuple = feat_extract_first.to_dict()
_A : List[Any] = feat_extract_second.to_dict()
_A : List[Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Dict:
_A : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Dict = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Optional[int] = self.feature_extraction_class.from_json_file(_a )
_A : str = feat_extract_first.to_dict()
_A : Any = feat_extract_second.to_dict()
_A : Union[str, Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_A : Dict = feature_extractor(_a , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_A : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_A : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : Tuple = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Any = np.asarray(_a )
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : int = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_A : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs]
_A : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
_A : Optional[int] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : List[Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Dict:
import torch
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
_A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Optional[Any] = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Tuple:
# fmt: off
_A : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_A : Dict = self._load_datasamples(1 )
_A : Optional[Any] = WhisperFeatureExtractor()
_A : Optional[Any] = feature_extractor(_a , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def a__ ( self ) -> str:
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : str = self._load_datasamples(1 )[0]
_A : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_A : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 721
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
_A : Tuple = word_bank or []
# create a table
_A : int = len(snake_case_ ) + 1
_A : list[list[list[str]]] = []
for _ in range(snake_case_ ):
table.append([] )
# seed value
_A : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 54
| 0
|
import fire
from utils import calculate_rouge, save_json
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=None,**snake_case_ ):
_A : Dict = [x.strip() for x in open(snake_case_ ).readlines()]
_A : Optional[int] = [x.strip() for x in open(snake_case_ ).readlines()][: len(snake_case_ )]
_A : List[Any] = calculate_rouge(snake_case_,snake_case_,**snake_case_ )
if save_path is not None:
save_json(snake_case_,snake_case_,indent=snake_case_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 700
|
import operator
def lowerCAmelCase_ ( snake_case_,snake_case_ = False,snake_case_ = None ):
_A : str = operator.lt if reverse else operator.gt
_A : Optional[Any] = solution or []
if not arr:
return solution
_A : Dict = [arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_,sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
_A : Union[str, Any] = sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_,snake_case_ ):
solution.insert(snake_case_,snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_,snake_case_,snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 54
| 0
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_MASKED_LM_MAPPING
_a = TF_MODEL_FOR_MASKED_LM_MAPPING
def a__ ( self ) -> Tuple:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a__ ( self ) -> Any:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
_A : Tuple = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
_A : List[str] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> str:
_A : Any = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
_A : List[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
_A : str = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def a__ ( self ) -> Union[str, Any]:
_A : int = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
_A : Optional[Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_a , _a )
@slow
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(_a )
@slow
@require_tf
def a__ ( self ) -> Tuple:
_A : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(_a )
def a__ ( self , _a ) -> Tuple:
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
_A : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
_A : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> Tuple:
_A : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
_A : str = None
_A : Union[str, Any] = None
self.run_pipeline_test(_a , [] )
@require_tf
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
_A : Any = None
_A : Dict = None
self.run_pipeline_test(_a , [] )
def a__ ( self , _a , _a , _a ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_A : Optional[Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Tuple = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a__ ( self , _a , _a ) -> Dict:
_A : Dict = fill_masker.tokenizer
_A : List[str] = fill_masker.model
_A : List[str] = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Optional[Any] = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
with self.assertRaises(_a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_a ):
fill_masker("""This is""" )
self.run_test_top_k(_a , _a )
self.run_test_targets(_a , _a )
self.run_test_top_k_targets(_a , _a )
self.fill_mask_with_duplicate_targets_and_top_k(_a , _a )
self.fill_mask_with_multiple_masks(_a , _a )
def a__ ( self , _a , _a ) -> List[str]:
_A : int = tokenizer.get_vocab()
_A : str = sorted(vocab.keys() )[:2]
# Pipeline argument
_A : Tuple = FillMaskPipeline(model=_a , tokenizer=_a , targets=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Call argument
_A : str = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : int = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Score equivalence
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Optional[int] = [top_mask["""token_str"""] for top_mask in outputs]
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ) == set(_a ):
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
# Raises with invalid
with self.assertRaises(_a ):
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_a ):
_A : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(_a ):
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="""""" )
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : str = FillMaskPipeline(model=_a , tokenizer=_a , top_k=2 )
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Union[str, Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> List[Any]:
_A : Union[str, Any] = tokenizer.get_vocab()
_A : int = FillMaskPipeline(model=_a , tokenizer=_a )
# top_k=2, ntargets=3
_A : List[str] = sorted(vocab.keys() )[:3]
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_A : Any = [el["""token_str"""] for el in sorted(_a , key=lambda _a : x["score"] , reverse=_a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ).issubset(_a ):
_A : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> str:
_A : Optional[int] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
_A : Optional[Any] = sorted(vocab.keys() )[:3]
_A : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_A : Union[str, Any] = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_a , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_a ) , 3 )
def a__ ( self , _a , _a ) -> Tuple:
_A : Any = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[Any] = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
| 54
| 0
|
from manim import *
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Tuple:
_A : Any = Rectangle(height=0.5 , width=0.5 )
_A : str = Rectangle(height=0.25 , width=0.25 )
_A : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A : Union[str, Any] = [mem.copy() for i in range(6 )]
_A : Tuple = [mem.copy() for i in range(6 )]
_A : Dict = VGroup(*_a ).arrange(_a , buff=0 )
_A : List[Any] = VGroup(*_a ).arrange(_a , buff=0 )
_A : Optional[int] = VGroup(_a , _a ).arrange(_a , buff=0 )
_A : Dict = Text("""CPU""" , font_size=24 )
_A : int = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_A : List[Any] = [mem.copy() for i in range(4 )]
_A : List[Any] = VGroup(*_a ).arrange(_a , buff=0 )
_A : Optional[Any] = Text("""GPU""" , font_size=24 )
_A : int = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_A : List[Any] = [mem.copy() for i in range(6 )]
_A : Optional[int] = VGroup(*_a ).arrange(_a , buff=0 )
_A : Tuple = Text("""Model""" , font_size=24 )
_A : int = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_A : Optional[int] = []
_A : Tuple = []
_A : List[Any] = []
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_A : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_A : List[Any] = [mem.copy() for i in range(6 )]
_A : Any = VGroup(*_a ).arrange(_a , buff=0 )
_A : Any = Text("""Loaded Checkpoint""" , font_size=24 )
_A : str = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_A : int = []
_A : List[str] = []
for i, rect in enumerate(_a ):
_A : Optional[Any] = fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_A : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_A : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A : Optional[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_A : Optional[Any] = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_A : Optional[int] = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
_A : List[Any] = [meta_mem.copy() for i in range(6 )]
_A : List[str] = [meta_mem.copy() for i in range(6 )]
_A : Union[str, Any] = VGroup(*_a ).arrange(_a , buff=0 )
_A : Tuple = VGroup(*_a ).arrange(_a , buff=0 )
_A : Any = VGroup(_a , _a ).arrange(_a , buff=0 )
_A : Tuple = Text("""Disk""" , font_size=24 )
_A : int = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_A : Optional[int] = []
for i, rect in enumerate(_a ):
_A : Optional[int] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_A : List[Any] = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait()
| 702
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def a__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
_A : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_A : str = dict(zip(_a , range(len(_a ) ) ) )
_A : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_A : str = {"""unk_token""": """<unk>"""}
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> str:
_A : Tuple = """lower newer"""
_A : Optional[Any] = """lower newer"""
return input_text, output_text
def a__ ( self ) -> List[Any]:
_A : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A : str = """lower newer"""
_A : List[str] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_A : int = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : str = tokens + [tokenizer.unk_token]
_A : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@require_ftfy
def a__ ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : Dict = self.tokenizer_class.from_pretrained(_a , **_a )
_A : str = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_A : List[str] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_A : Tuple = """xa\u0303y""" + """ """ + """x\xe3y"""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of space type
_A : Any = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_A : Optional[int] = tokenizer_s.tokenize(_a )
_A : List[Any] = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of line break type
_A : int = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_A : Optional[Any] = tokenizer_s.tokenize(_a )
_A : Tuple = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_A : str = F'''{text_of_1_token} {text_of_1_token}'''
_A : str = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Dict = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_A : Any = F''' {text}'''
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
def a__ ( self ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_a ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def a__ ( self ) -> str:
super().test_tokenization_python_rust_equals()
def a__ ( self ) -> Union[str, Any]:
# CLIP always lower cases letters
pass
| 54
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : List[str] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def a__ ( self ) -> List[Any]:
torch.manual_seed(0 )
_A : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Dict = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
_A : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def a__ ( self ) -> int:
_A : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : List[str] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_A : List[str] = DDPMScheduler()
_A : Dict = AudioDiffusionPipeline(vqvae=_a , unet=self.dummy_unet , mel=_a , scheduler=_a )
_A : Optional[int] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[int] = torch.Generator(device=_a ).manual_seed(42 )
_A : Union[str, Any] = pipe(generator=_a , steps=4 )
_A : int = output.audios[0]
_A : Dict = output.images[0]
_A : Any = torch.Generator(device=_a ).manual_seed(42 )
_A : Tuple = pipe(generator=_a , steps=4 , return_dict=_a )
_A : List[Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_A : List[str] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_A : Union[str, Any] = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
_A : List[Any] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_A : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_A : Any = DDIMScheduler()
_A : Tuple = self.dummy_vqvae_and_unet
_A : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_a , scheduler=_a )
_A : List[Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
np.random.seed(0 )
_A : Dict = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_A : int = torch.Generator(device=_a ).manual_seed(42 )
_A : int = pipe(raw_audio=_a , generator=_a , start_step=5 , steps=10 )
_A : Dict = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_A : Optional[Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_A : List[Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_A : List[Any] = self.dummy_unet_condition
_A : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_a , mel=_a , scheduler=_a )
_A : List[str] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
np.random.seed(0 )
_A : List[str] = torch.rand((1, 1, 10) )
_A : Tuple = pipe(generator=_a , encoding=_a )
_A : Tuple = output.images[0]
_A : Optional[int] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_A : int = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Optional[int]:
_A : Tuple = torch_device
_A : Union[str, Any] = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
_A : Optional[int] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[str] = torch.Generator(device=_a ).manual_seed(42 )
_A : Dict = pipe(generator=_a )
_A : Optional[int] = output.audios[0]
_A : Optional[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_A : Union[str, Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_A : Any = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 703
|
from datetime import datetime as dt
import os
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCAmelCase_ ( ):
_A : int = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Tuple = g.get_repo("""huggingface/transformers""" )
_A : Dict = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Optional[Any] = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 54
| 0
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowercase ( UpperCamelCase__ ):
_a = "Wav2Vec2FeatureExtractor"
_a = "AutoTokenizer"
def __init__( self , _a , _a ) -> List[str]:
super().__init__(_a , _a )
_A : int = self.feature_extractor
_A : Optional[int] = False
@classmethod
def a__ ( cls , _a , **_a ) -> str:
try:
return super().from_pretrained(_a , **_a )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , _a , )
_A : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(_a , **_a )
_A : str = WavaVecaCTCTokenizer.from_pretrained(_a , **_a )
return cls(feature_extractor=_a , tokenizer=_a )
def __call__( self , *_a , **_a ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
_A : Dict = kwargs.pop("""raw_speech""" )
else:
_A : int = kwargs.pop("""audio""" , _a )
_A : str = kwargs.pop("""sampling_rate""" , _a )
_A : Tuple = kwargs.pop("""text""" , _a )
if len(_a ) > 0:
_A : Dict = args[0]
_A : Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
_A : Optional[Any] = self.feature_extractor(_a , *_a , sampling_rate=_a , **_a )
if text is not None:
_A : str = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_A : Dict = encodings["""input_ids"""]
return inputs
def a__ ( self , *_a , **_a ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_a , **_a )
_A : Optional[Any] = kwargs.pop("""input_features""" , _a )
_A : Dict = kwargs.pop("""labels""" , _a )
if len(_a ) > 0:
_A : Union[str, Any] = args[0]
_A : List[Any] = args[1:]
if input_features is not None:
_A : Dict = self.feature_extractor.pad(_a , *_a , **_a )
if labels is not None:
_A : List[Any] = self.tokenizer.pad(_a , **_a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_A : List[str] = labels["""input_ids"""]
return input_features
def a__ ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> Any:
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def a__ ( self ) -> Optional[Any]:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
_A : List[str] = True
_A : Optional[int] = self.tokenizer
yield
_A : List[str] = self.feature_extractor
_A : str = False
| 704
|
from __future__ import annotations
class lowercase :
def __init__( self , _a = 0 ) -> str:
_A : Any = key
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def a__ ( self , _a , _a ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54
| 0
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_snake_case = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase ( datasets.BuilderConfig ):
_a = 1_0_0_0_0
_a = None
_a = None
class lowercase ( datasets.ArrowBasedBuilder ):
_a = ParquetConfig
def a__ ( self ) -> Optional[int]:
return datasets.DatasetInfo(features=self.config.features )
def a__ ( self , _a ) -> Any:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_A : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_A : Optional[Any] = data_files
if isinstance(_a , _a ):
_A : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_A : Optional[Any] = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_A : List[str] = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_A : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_A : Dict = [dl_manager.iter_files(_a ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_a ):
with open(_a , """rb""" ) as f:
_A : int = datasets.Features.from_arrow_schema(pq.read_schema(_a ) )
break
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={"""files""": files} ) )
return splits
def a__ ( self , _a ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_A : List[Any] = table_cast(_a , self.info.features.arrow_schema )
return pa_table
def a__ ( self , _a ) -> Tuple:
_A : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a , """rb""" ) as f:
_A : Union[str, Any] = pq.ParquetFile(_a )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_A : Optional[int] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(_a )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' )
raise
| 705
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=10 , _a=160 , _a=8 , _a=0.0 , _a=4000 , _a=False , _a=True , ) -> Optional[int]:
_A : Any = parent
_A : List[Any] = batch_size
_A : List[Any] = min_seq_length
_A : Dict = max_seq_length
_A : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = padding_value
_A : Tuple = sampling_rate
_A : str = return_attention_mask
_A : Any = do_normalize
_A : Union[str, Any] = feature_size
_A : List[Any] = chunk_length
_A : List[Any] = hop_length
def a__ ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , _a=False , _a=False ) -> List[str]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : Any = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = WhisperFeatureExtractor if is_speech_available() else None
def a__ ( self ) -> Tuple:
_A : Optional[int] = WhisperFeatureExtractionTester(self )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Tuple = feat_extract_first.to_dict()
_A : List[Any] = feat_extract_second.to_dict()
_A : List[Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Dict:
_A : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Dict = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Optional[int] = self.feature_extraction_class.from_json_file(_a )
_A : str = feat_extract_first.to_dict()
_A : Any = feat_extract_second.to_dict()
_A : Union[str, Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_A : Dict = feature_extractor(_a , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_A : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_A : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : Tuple = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Any = np.asarray(_a )
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : int = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_A : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs]
_A : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
_A : Optional[int] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : List[Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Dict:
import torch
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
_A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Optional[Any] = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Tuple:
# fmt: off
_A : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_A : Dict = self._load_datasamples(1 )
_A : Optional[Any] = WhisperFeatureExtractor()
_A : Optional[Any] = feature_extractor(_a , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def a__ ( self ) -> str:
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : str = self._load_datasamples(1 )[0]
_A : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_A : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 54
| 0
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Any:
super().__init__()
_A : Dict = pad_token_id
_A : List[Any] = max_length
_A : Optional[int] = vocab
_A : Optional[int] = merges
_A : Optional[int] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> str:
_A : Any = [""" """.join(_a ) for m in tokenizer.bpe_ranks.keys()]
_A : str = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> List[Any]:
_A : Union[str, Any] = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def a__ ( cls , _a ) -> Union[str, Any]:
return cls(**_a )
def a__ ( self ) -> Union[str, Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ ( self , _a , _a = None ) -> int:
_A : Optional[int] = self.tf_tokenizer(_a )
_A : Tuple = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_A : Dict = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 706
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( snake_case_ ):
return data[1:] + data[0]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = """"""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = int("""0b""" + data[0] + data[-1],2 )
_A : Any = int("""0b""" + data[1:3],2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = message[:4]
_A : List[Any] = message[4:]
_A : Union[str, Any] = apply_table(snake_case_,snake_case_ )
_A : List[Any] = xor(snake_case_,snake_case_ )
_A : Optional[Any] = apply_sbox(snake_case_,temp[:4] ) # noqa: E741
_A : List[Any] = apply_sbox(snake_case_,temp[4:] )
_A : int = """0""" * (2 - len(snake_case_ )) + l # noqa: E741
_A : Union[str, Any] = """0""" * (2 - len(snake_case_ )) + r
_A : List[Any] = apply_table(l + r,snake_case_ )
_A : Any = xor(snake_case_,snake_case_ )
return temp + right
if __name__ == "__main__":
_snake_case = input("Enter 10 bit key: ")
_snake_case = input("Enter 8 bit message: ")
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 54
| 0
|
import math
def lowerCAmelCase_ ( snake_case_ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5,int(math.sqrt(snake_case_ ) + 1 ),6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( snake_case_ = 10001 ):
try:
_A : Dict = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
_A : list[int] = []
_A : List[Any] = 2
while len(snake_case_ ) < nth:
if is_prime(snake_case_ ):
primes.append(snake_case_ )
num += 1
else:
num += 1
return primes[len(snake_case_ ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 707
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Any = size if size is not None else {"""height""": 18, """width""": 18}
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : List[Any] = num_channels
_A : List[str] = image_size
_A : Optional[Any] = min_resolution
_A : List[Any] = max_resolution
_A : Optional[Any] = do_resize
_A : str = size
_A : List[str] = do_normalize
_A : Dict = image_mean
_A : int = image_std
def a__ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> List[str]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 54
| 0
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a=None , _a=None , *_a , **_a ) -> Optional[int]:
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_A : Optional[Any] = self.model.config
else:
_A : int = config
_A : Optional[Any] = data_args
_A : int = self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_A : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_A : Union[str, Any] = label_smoothed_nll_loss
def a__ ( self , _a ) -> int:
if self.optimizer is None:
_A : List[str] = ["""bias""", """LayerNorm.weight"""]
_A : str = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_A : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_A : Dict = Adafactor
_A : int = {"""scale_parameter""": False, """relative_step""": False}
else:
_A : int = AdamW
_A : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_A : List[str] = self.args.learning_rate
if self.sharded_ddp:
_A : List[str] = OSS(
params=_a , optim=_a , **_a , )
else:
_A : Tuple = optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
_A : Union[str, Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def a__ ( self , _a ) -> Dict:
_A : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_A : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_A : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_A : List[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def a__ ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def a__ ( self , _a , _a , _a ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_A : List[str] = model(**_a , use_cache=_a )[0]
_A : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_A , _A : str = model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
_A : Any = model(**_a , use_cache=_a )[0]
_A : Union[str, Any] = torch.nn.functional.log_softmax(_a , dim=-1 )
_A , _A : List[str] = self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def a__ ( self , _a , _a ) -> List[Any]:
_A : Optional[int] = inputs.pop("""labels""" )
_A , _A : Dict = self._compute_loss(_a , _a , _a )
return loss
def a__ ( self , _a , _a , _a , _a = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_A : int = self._prepare_inputs(_a )
_A : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_A : List[str] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_A : str = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
_A : Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_A , _A : Tuple = self._compute_loss(_a , _a , _a )
_A : Any = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_A : List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_A : Any = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def a__ ( self , _a , _a ) -> Union[str, Any]:
# If PAD token is not defined at least EOS token has to be defined
_A : Optional[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F''' padded to `max_length`={max_length}''' )
_A : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_A : Dict = tensor
return padded_tensor
| 54
| 0
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=8 ):
_A : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , ) -> Optional[int]:
super().__init__()
self.register_modules(
unet=_a , scheduler=_a , movq=_a , )
_A : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> Dict:
if latents is None:
_A : Any = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A : List[Any] = latents.to(_a )
_A : Any = latents * scheduler.init_noise_sigma
return latents
def a__ ( self , _a=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_A : Any = torch.device(F'''cuda:{gpu_id}''' )
_A : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
def a__ ( self , _a=0 ) -> Any:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_A : List[str] = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=_a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A : List[str] = cpu_offload_with_hook(_a , _a , prev_module_hook=_a )
# We'll offload the last model manually.
_A : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a__ ( self ) -> Optional[Any]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 100 , _a = 4.0 , _a = 1 , _a = None , _a = None , _a = "pil" , _a = True , ) -> Optional[int]:
_A : Optional[int] = self._execution_device
_A : Any = guidance_scale > 1.0
if isinstance(_a , _a ):
_A : Union[str, Any] = torch.cat(_a , dim=0 )
if isinstance(_a , _a ):
_A : Any = torch.cat(_a , dim=0 )
if isinstance(_a , _a ):
_A : List[str] = torch.cat(_a , dim=0 )
_A : List[str] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_A : Any = image_embeds.repeat_interleave(_a , dim=0 )
_A : Dict = negative_image_embeds.repeat_interleave(_a , dim=0 )
_A : Union[str, Any] = hint.repeat_interleave(_a , dim=0 )
_A : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_a )
_A : Union[str, Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_a )
self.scheduler.set_timesteps(_a , device=_a )
_A : int = self.scheduler.timesteps
_A : str = self.movq.config.latent_channels
_A : int = downscale_height_and_width(_a , _a , self.movq_scale_factor )
# create initial latent
_A : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_A : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A : List[str] = {"""image_embeds""": image_embeds, """hint""": hint}
_A : Dict = self.unet(
sample=_a , timestep=_a , encoder_hidden_states=_a , added_cond_kwargs=_a , return_dict=_a , )[0]
if do_classifier_free_guidance:
_A : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
_A : Dict = noise_pred.chunk(2 )
_A : Optional[int] = variance_pred.chunk(2 )
_A : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A : Optional[int] = self.scheduler.step(
_a , _a , _a , generator=_a , )[0]
# post-processing
_A : str = self.movq.decode(_a , force_not_quantize=_a )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_A : Optional[Any] = image * 0.5 + 0.5
_A : Union[str, Any] = image.clamp(0 , 1 )
_A : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A : int = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 709
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_,snake_case_ ) -> bool:
_A : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A : Optional[int] = mean(
int(is_in_circle(uniform(-1.0,1.0 ),uniform(-1.0,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
_A : List[str] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0.0,snake_case_ = 1.0,):
return mean(
function_to_integrate(uniform(snake_case_,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
def identity_function(snake_case_ ) -> float:
return x
_A : Any = area_under_curve_estimator(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def lowerCAmelCase_ ( snake_case_ ):
def function_to_integrate(snake_case_ ) -> float:
return sqrt(4.0 - x * x )
_A : Optional[int] = area_under_curve_estimator(
snake_case_,snake_case_,0.0,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 0
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_snake_case = TypeVar("T")
class lowercase ( Generic[T] ):
def __init__( self , _a ) -> Any:
_A : Optional[Any] = data
_A : Node[T] | None = None
def __str__( self ) -> str:
return F'''{self.data}'''
class lowercase ( Generic[T] ):
def __init__( self ) -> None:
_A : Node[T] | None = None
def __iter__( self ) -> Iterator[T]:
_A : Tuple = self.top
while node:
yield node.data
_A : Union[str, Any] = node.next
def __str__( self ) -> str:
return "->".join([str(_a ) for item in self] )
def __len__( self ) -> int:
return len(tuple(iter(self ) ) )
def a__ ( self ) -> bool:
return self.top is None
def a__ ( self , _a ) -> None:
_A : Optional[Any] = Node(_a )
if not self.is_empty():
_A : Optional[Any] = self.top
_A : Union[str, Any] = node
def a__ ( self ) -> T:
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , _a )
_A : List[Any] = self.top
_A : str = self.top.next
return pop_node.data
def a__ ( self ) -> T:
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def a__ ( self ) -> None:
_A : List[Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 710
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> int:
_A : int = tempfile.mkdtemp()
_A : Union[str, Any] = 8
# DPR tok
_A : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_A : List[str] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_A : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_A : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_A : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A : Dict = {"""unk_token""": """<unk>"""}
_A : Optional[Any] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_A : List[Any] = os.path.join(_a , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def a__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def a__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def a__ ( self ) -> str:
_A : Optional[Any] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
_A : int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_A : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_a )
rag_tokenizer.save_pretrained(_a )
_A : Optional[Any] = RagTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def a__ ( self ) -> str:
_A : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
_A : Tuple = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Tuple = tokenizer(_a )
self.assertIsNotNone(_a )
@slow
def a__ ( self ) -> Dict:
_A : Dict = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
_A : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Optional[Any] = tokenizer(_a )
self.assertIsNotNone(_a )
| 54
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class lowercase ( UpperCamelCase__ ):
_a = "altclip_text_model"
def __init__( self , _a=25_0002 , _a=1024 , _a=24 , _a=16 , _a=4096 , _a="gelu" , _a=0.1 , _a=0.1 , _a=514 , _a=1 , _a=0.02 , _a=0.02 , _a=1e-05 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=768 , **_a , ) -> Optional[Any]:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Optional[int] = vocab_size
_A : List[str] = hidden_size
_A : Optional[int] = num_hidden_layers
_A : str = num_attention_heads
_A : Dict = hidden_act
_A : Optional[Any] = intermediate_size
_A : Dict = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : str = max_position_embeddings
_A : Optional[int] = type_vocab_size
_A : Optional[int] = initializer_range
_A : Any = initializer_factor
_A : List[Any] = layer_norm_eps
_A : Dict = position_embedding_type
_A : int = use_cache
_A : Dict = project_dim
class lowercase ( UpperCamelCase__ ):
_a = "altclip_vision_model"
def __init__( self , _a=768 , _a=3072 , _a=512 , _a=12 , _a=12 , _a=3 , _a=224 , _a=32 , _a="quick_gelu" , _a=1e-5 , _a=0.0 , _a=0.02 , _a=1.0 , **_a , ) -> Any:
super().__init__(**_a )
_A : Tuple = hidden_size
_A : Optional[int] = intermediate_size
_A : int = projection_dim
_A : Tuple = num_hidden_layers
_A : str = num_attention_heads
_A : Tuple = num_channels
_A : Union[str, Any] = patch_size
_A : List[str] = image_size
_A : Tuple = initializer_range
_A : Optional[int] = initializer_factor
_A : str = attention_dropout
_A : Dict = layer_norm_eps
_A : Dict = hidden_act
@classmethod
def a__ ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
_A : int = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
_A : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class lowercase ( UpperCamelCase__ ):
_a = "altclip"
_a = True
def __init__( self , _a=None , _a=None , _a=768 , _a=2.6592 , **_a ) -> int:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
_A : Union[str, Any] = kwargs.pop("""text_config_dict""" , _a )
_A : Optional[int] = kwargs.pop("""vision_config_dict""" , _a )
super().__init__(**_a )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
_A : Optional[int] = {}
# This is the complete result when using `text_config_dict`.
_A : str = AltCLIPTextConfig(**_a ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
_A : Dict = (
F'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
F'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
_A : List[str] = (
F'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
F'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(_a )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
_A : Dict = {}
# This is the complete result when using `vision_config_dict`.
_A : Union[str, Any] = AltCLIPVisionConfig(**_a ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_A : List[Any] = {
str(_a ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
_A : int = (
F'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
F'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
_A : List[Any] = (
F'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
F'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(_a )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
_A : Any = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
_A : List[str] = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
_A : Tuple = AltCLIPTextConfig(**_a )
_A : List[Any] = AltCLIPVisionConfig(**_a )
_A : Optional[Any] = projection_dim
_A : Tuple = logit_scale_init_value
_A : Dict = 1.0
@classmethod
def a__ ( cls , _a , _a , **_a ) -> Tuple:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def a__ ( self ) -> Tuple:
_A : List[Any] = copy.deepcopy(self.__dict__ )
_A : List[Any] = self.text_config.to_dict()
_A : Dict = self.vision_config.to_dict()
_A : List[Any] = self.__class__.model_type
return output
| 711
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_A : str = self.transformer_dir
shutil.copy(
os.path.join(_a , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a__ ( self ) -> Optional[int]:
_A : List[str] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a__ ( self , _a , _a , _a , _a=None ) -> Optional[Any]:
_A : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_A : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_A : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_A : Optional[int] = black.format_str(_a , mode=_a )
_A : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_a , """w""" , newline="""\n""" ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_a )
with open(_a , """r""" ) as f:
self.assertTrue(f.read() , _a )
def a__ ( self ) -> str:
_A : Union[str, Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _a , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _a ) , )
# Copy consistency with a really long name
_A : List[str] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , _a , _a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _a , overwrite_result=re.sub("""Bert""" , """TestModel""" , _a ) , )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_A , _A : Tuple = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
self.assertFalse(_a )
self.assertEqual(_a , _a )
_A , _A : List[str] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_a )
_A : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_A : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A , _A : Optional[int] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_a , _a )
| 54
| 0
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Tuple:
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=_a , speech_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , )
def a__ ( self , _a = "auto" ) -> List[Any]:
if slice_size == "auto":
_A : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def a__ ( self ) -> Tuple:
self.enable_attention_slicing(_a )
@torch.no_grad()
def __call__( self , _a , _a=1_6000 , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ) -> int:
_A : Dict = self.speech_processor.feature_extractor(
_a , return_tensors="""pt""" , sampling_rate=_a ).input_features.to(self.device )
_A : Optional[int] = self.speech_model.generate(_a , max_length=48_0000 )
_A : Union[str, Any] = self.speech_processor.tokenizer.batch_decode(_a , skip_special_tokens=_a , normalize=_a )[
0
]
if isinstance(_a , _a ):
_A : Any = 1
elif isinstance(_a , _a ):
_A : Optional[int] = len(_a )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_a )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_a , _a ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_a )}.''' )
# get prompt text embeddings
_A : List[Any] = self.tokenizer(
_a , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
_A : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_A : List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_A : str = text_input_ids[:, : self.tokenizer.model_max_length]
_A : Optional[int] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_A : List[Any] = text_embeddings.shape
_A : Tuple = text_embeddings.repeat(1 , _a , 1 )
_A : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , _a , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_A : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_A : List[str]
if negative_prompt is None:
_A : List[Any] = [""""""] * batch_size
elif type(_a ) is not type(_a ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(_a )} !='''
F''' {type(_a )}.''' )
elif isinstance(_a , _a ):
_A : str = [negative_prompt]
elif batch_size != len(_a ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(_a )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
_A : Optional[int] = negative_prompt
_A : Tuple = text_input_ids.shape[-1]
_A : Dict = self.tokenizer(
_a , padding="""max_length""" , max_length=_a , truncation=_a , return_tensors="""pt""" , )
_A : int = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_A : Optional[Any] = uncond_embeddings.shape[1]
_A : Dict = uncond_embeddings.repeat(1 , _a , 1 )
_A : str = uncond_embeddings.view(batch_size * num_images_per_prompt , _a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A : Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_A : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_A : Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_A : List[str] = torch.randn(_a , generator=_a , device="""cpu""" , dtype=_a ).to(
self.device )
else:
_A : Dict = torch.randn(_a , generator=_a , device=self.device , dtype=_a )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_A : Any = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_A : int = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_A : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A : List[str] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_A : Optional[int] = {}
if accepts_eta:
_A : Dict = eta
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A : Dict = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
_A : Optional[Any] = self.unet(_a , _a , encoder_hidden_states=_a ).sample
# perform guidance
if do_classifier_free_guidance:
_A : Dict = noise_pred.chunk(2 )
_A : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_A : str = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_a , _a , _a )
_A : List[str] = 1 / 0.18215 * latents
_A : List[str] = self.vae.decode(_a ).sample
_A : int = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_A : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A : Dict = self.numpy_to_pil(_a )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a )
| 712
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=2048 , _a=128 , _a=1 , _a=512 , _a=30 , _a=4_4100 , ) -> Tuple:
_A : Any = parent
_A : str = batch_size
_A : Union[str, Any] = min_seq_length
_A : int = max_seq_length
_A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = spectrogram_length
_A : int = feature_size
_A : str = num_audio_channels
_A : Tuple = hop_length
_A : List[str] = chunk_length
_A : Union[str, Any] = sampling_rate
def a__ ( self ) -> Tuple:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a__ ( self , _a=False , _a=False ) -> Optional[int]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : List[Any] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = TvltFeatureExtractor
def a__ ( self ) -> Any:
_A : int = TvltFeatureExtractionTester(self )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , """spectrogram_length""" ) )
self.assertTrue(hasattr(_a , """feature_size""" ) )
self.assertTrue(hasattr(_a , """num_audio_channels""" ) )
self.assertTrue(hasattr(_a , """hop_length""" ) )
self.assertTrue(hasattr(_a , """chunk_length""" ) )
self.assertTrue(hasattr(_a , """sampling_rate""" ) )
def a__ ( self ) -> Optional[int]:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : int = feat_extract_second.to_dict()
_A : int = dict_first.pop("""mel_filters""" )
_A : Optional[int] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Union[str, Any] = self.feature_extraction_class.from_json_file(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : Union[str, Any] = feat_extract_second.to_dict()
_A : List[Any] = dict_first.pop("""mel_filters""" )
_A : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Optional[Any]:
# Initialize feature_extractor
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_A : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A : str = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A : Optional[Any] = feature_extractor(
_a , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Optional[int] = np.asarray(_a )
_A : Optional[Any] = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Dict = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self._load_datasamples(1 )
_A : List[str] = TvltFeatureExtractor()
_A : List[Any] = feature_extractor(_a , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A : int = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 54
| 0
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Optional[Any]:
_A : List[str] = torch.nn.Linear(10 , 10 )
_A : int = torch.optim.SGD(model.parameters() , 0.1 )
_A : str = Accelerator()
_A : List[Any] = accelerator.prepare(_a )
try:
pickle.loads(pickle.dumps(_a ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 0
|
import random
from .binary_exp_mod import bin_exp_mod
def lowerCAmelCase_ ( snake_case_,snake_case_=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_A : List[str] = n - 1
_A : Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_A : int = 0
while count < prec:
_A : Any = random.randint(2,n - 1 )
_A : Any = bin_exp_mod(snake_case_,snake_case_,snake_case_ )
if b != 1:
_A : List[Any] = True
for _ in range(snake_case_ ):
if b == n - 1:
_A : List[str] = False
break
_A : Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 714
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaImgaImgPipeline
_a = ["image_embeds", "negative_image_embeds", "image"]
_a = [
"image_embeds",
"negative_image_embeds",
"image",
]
_a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> int:
return 32
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> List[str]:
return self.time_input_dim
@property
def a__ ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> str:
return 100
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Union[str, Any] = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> int:
_A : Any = self.dummy_unet
_A : List[Any] = self.dummy_movq
_A : str = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_A : int = DDIMScheduler(**_a )
_A : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> str:
_A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
if str(_a ).startswith("""mps""" ):
_A : Tuple = torch.manual_seed(_a )
else:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Optional[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Union[str, Any]:
_A : Dict = """cpu"""
_A : int = self.get_dummy_components()
_A : Optional[int] = self.pipeline_class(**_a )
_A : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : Dict = output.images
_A : List[str] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : Dict = image[0, -3:, -3:, -1]
_A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[int] = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> List[str]:
_A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_A : Dict = """A red cartoon frog, 4k"""
_A : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : int = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_A : Dict = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A , _A : List[str] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_A : int = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 54
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowercase ( UpperCamelCase__ ):
_a = "mctct"
def __init__( self , _a=8065 , _a=1536 , _a=36 , _a=6144 , _a=4 , _a=384 , _a=920 , _a=1e-5 , _a=0.3 , _a="relu" , _a=0.02 , _a=0.3 , _a=0.3 , _a=1 , _a=0 , _a=2 , _a=1 , _a=0.3 , _a=1 , _a=(7,) , _a=(3,) , _a=80 , _a=1 , _a=None , _a="sum" , _a=False , **_a , ) -> List[str]:
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
_A : Dict = vocab_size
_A : Tuple = hidden_size
_A : Optional[int] = num_hidden_layers
_A : List[str] = intermediate_size
_A : int = num_attention_heads
_A : Union[str, Any] = attention_head_dim
_A : Tuple = max_position_embeddings
_A : int = layer_norm_eps
_A : int = layerdrop
_A : Optional[int] = hidden_act
_A : List[Any] = initializer_range
_A : Optional[int] = hidden_dropout_prob
_A : Union[str, Any] = attention_probs_dropout_prob
_A : str = pad_token_id
_A : int = bos_token_id
_A : int = eos_token_id
_A : List[str] = conv_glu_dim
_A : Optional[int] = conv_dropout
_A : Any = num_conv_layers
_A : Optional[Any] = input_feat_per_channel
_A : List[Any] = input_channels
_A : List[str] = conv_channels
_A : int = ctc_loss_reduction
_A : Optional[Any] = ctc_zero_infinity
# prevents config testing fail with exporting to json
_A : str = list(_a )
_A : Union[str, Any] = list(_a )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 715
|
def lowerCAmelCase_ ( snake_case_ = 1000000 ):
_A : Any = limit + 1
_A : Tuple = [0] * limit
for first_term in range(1,snake_case_ ):
for n in range(snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_A : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
| 0
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_snake_case : List[Any] = "src/diffusers"
# Matches is_xxx_available()
_snake_case : List[Any] = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
_snake_case : List[str] = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
_snake_case : Optional[int] = "\n{0} = None\n"
_snake_case : str = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
_snake_case : Dict = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = _re_backend.findall(snake_case_ )
if len(snake_case_ ) == 0:
return None
return "_and_".join(snake_case_ )
def lowerCAmelCase_ ( ):
with open(os.path.join(snake_case_,"""__init__.py""" ),"""r""",encoding="""utf-8""",newline="""\n""" ) as f:
_A : Optional[int] = f.readlines()
# Get to the point we do the actual imports for type checking
_A : Any = 0
_A : Optional[int] = {}
# Go through the end of the file
while line_index < len(snake_case_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_A : str = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
_A : Any = []
# Until we unindent, add backend objects to the list
while line_index < len(snake_case_ ) and len(lines[line_index] ) > 1:
_A : Optional[Any] = lines[line_index]
_A : Tuple = _re_single_line_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(snake_case_ ) > 0:
_A : Any = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if name.isupper():
return DUMMY_CONSTANT.format(snake_case_ )
elif name.islower():
return DUMMY_FUNCTION.format(snake_case_,snake_case_ )
else:
return DUMMY_CLASS.format(snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_=None ):
if backend_specific_objects is None:
_A : Dict = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_A : List[Any] = {}
for backend, objects in backend_specific_objects.items():
_A : int = """[""" + """, """.join(f'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
_A : str = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(snake_case_,snake_case_ ) for o in objects] )
_A : List[Any] = dummy_file
return dummy_files
def lowerCAmelCase_ ( snake_case_=False ):
_A : Dict = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_A : Optional[int] = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
_A : Optional[int] = os.path.join(snake_case_,"""utils""" )
_A : str = {
backend: os.path.join(snake_case_,f'''dummy_{short_names.get(snake_case_,snake_case_ )}_objects.py''' )
for backend in dummy_files.keys()
}
_A : List[str] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(snake_case_ ):
with open(snake_case_,"""r""",encoding="""utf-8""",newline="""\n""" ) as f:
_A : Optional[Any] = f.read()
else:
_A : int = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(snake_case_,snake_case_ )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend],"""w""",encoding="""utf-8""",newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f'''diffusers.utils.dummy_{short_names.get(snake_case_,snake_case_ )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case : Any = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 716
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Any:
super().__init__()
_A : Dict = pad_token_id
_A : List[Any] = max_length
_A : Optional[int] = vocab
_A : Optional[int] = merges
_A : Optional[int] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> str:
_A : Any = [""" """.join(_a ) for m in tokenizer.bpe_ranks.keys()]
_A : str = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> List[Any]:
_A : Union[str, Any] = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def a__ ( cls , _a ) -> Union[str, Any]:
return cls(**_a )
def a__ ( self ) -> Union[str, Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ ( self , _a , _a = None ) -> int:
_A : Optional[int] = self.tf_tokenizer(_a )
_A : Tuple = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A : Dict = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54
| 0
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ) -> Dict:
_A : Any = parent
_A : Any = batch_size
_A : List[str] = seq_length
_A : List[Any] = is_training
_A : List[str] = use_attention_mask
_A : Dict = use_token_type_ids
_A : str = use_labels
_A : Optional[int] = vocab_size
_A : Union[str, Any] = hidden_size
_A : List[str] = num_hidden_layers
_A : int = num_attention_heads
_A : List[Any] = intermediate_size
_A : Optional[Any] = hidden_act
_A : List[str] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : List[Any] = max_position_embeddings
_A : Tuple = type_vocab_size
_A : str = type_sequence_label_size
_A : Optional[int] = initializer_range
_A : Optional[int] = num_choices
def a__ ( self ) -> Dict:
_A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : str = None
if self.use_attention_mask:
_A : int = random_attention_mask([self.batch_size, self.seq_length] )
_A : Optional[int] = None
if self.use_token_type_ids:
_A : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Dict = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = self.prepare_config_and_inputs()
_A : List[Any] = config_and_inputs
_A : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def a__ ( self ) -> str:
_A : Optional[Any] = self.prepare_config_and_inputs()
_A : Dict = config_and_inputs
_A : int = True
_A : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = True
_a = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self ) -> Optional[int]:
_A : int = FlaxRobertaModelTester(self )
@slow
def a__ ( self ) -> Dict:
for model_class_name in self.all_model_classes:
_A : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=_a )
_A : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
| 717
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54
| 0
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return abs(snake_case_ ) if a == 0 else greatest_common_divisor(b % a,snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_A : Dict = y, x % y
return abs(snake_case_ )
def lowerCAmelCase_ ( ):
try:
_A : Tuple = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
_A : Any = int(nums[0] )
_A : int = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(snake_case_,snake_case_ )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(snake_case_,snake_case_ )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 718
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 0
|
from ... import PretrainedConfig
_snake_case = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
_a = "nezha"
def __init__( self , _a=2_1128 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=64 , _a=2 , _a=0.02 , _a=1e-12 , _a=0.1 , _a=0 , _a=2 , _a=3 , _a=True , **_a , ) -> str:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : str = vocab_size
_A : Optional[int] = hidden_size
_A : str = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Any = hidden_act
_A : Tuple = intermediate_size
_A : Optional[int] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : int = max_position_embeddings
_A : List[str] = max_relative_position
_A : str = type_vocab_size
_A : Any = initializer_range
_A : Union[str, Any] = layer_norm_eps
_A : Optional[int] = classifier_dropout
_A : Any = use_cache
| 719
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Optional[int]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_a )
def a__ ( self ) -> Any:
_A : str = self._create_example_records()
_A : List[Any] = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def a__ ( self ) -> List[str]:
_A : Dict = self._create_example_records()
_A : List[str] = Dataset.from_list(_a )
_A : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self ) -> str: # checks what happens with missing columns
_A : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A : List[str] = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a__ ( self ) -> Dict: # checks if the type can be inferred from the second record
_A : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A : str = Dataset.from_list(_a )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a__ ( self ) -> Dict:
_A : List[str] = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 54
| 0
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if b == 0:
return (1, 0)
(_A) : Optional[Any] = extended_euclid(snake_case_,a % b )
_A : Optional[Any] = a // b
return (y, x - k * y)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
(_A) : Tuple = extended_euclid(snake_case_,snake_case_ )
_A : str = na * na
_A : Tuple = ra * x * na + ra * y * na
return (n % m + m) % m
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
(_A) : Tuple = extended_euclid(snake_case_,snake_case_ )
if b < 0:
_A : List[str] = (b % n + n) % n
return b
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = invert_modulo(snake_case_,snake_case_ ), invert_modulo(snake_case_,snake_case_ )
_A : Union[str, Any] = na * na
_A : Optional[int] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 720
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(snake_case_ )
_A : List[Any] = list(snake_case_ )
_A : Tuple = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_A : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
while True:
_A : int = ["""$"""] * len(snake_case_ )
_A : Any = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1,len(snake_case_ ) ):
_A : Tuple = compare_string(binary[i],binary[j] )
if k is False:
_A : str = """*"""
_A : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_A : Dict = list(set(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = []
for minterm in minterms:
_A : Tuple = """"""
for _ in range(snake_case_ ):
_A : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = list(snake_case_ )
_A : Tuple = list(snake_case_ )
_A : Dict = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = []
_A : str = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_A : Union[str, Any] = 0
_A : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_A : Dict = j
if count == 1:
_A : int = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_A : int = 0
temp.append(prime_implicants[i] )
while True:
_A : Optional[Any] = 0
_A : Tuple = -1
_A : List[Any] = 0
for i in range(len(snake_case_ ) ):
_A : List[str] = chart[i].count(1 )
if count_n > max_n:
_A : Optional[int] = count_n
_A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_A : Optional[int] = 0
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i],binary[j],snake_case_ ):
_A : Union[str, Any] = 1
return chart
def lowerCAmelCase_ ( ):
_A : Dict = int(input("""Enter the no. of variables\n""" ) )
_A : Dict = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_A : int = decimal_to_binary(snake_case_,snake_case_ )
_A : Optional[Any] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_A : int = prime_implicant_chart(snake_case_,snake_case_ )
_A : int = selection(snake_case_,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 54
| 0
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_snake_case = None
try:
import msvcrt
except ImportError:
_snake_case = None
try:
import fcntl
except ImportError:
_snake_case = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_snake_case = OSError
# Data
# ------------------------------------------------
_snake_case = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
_snake_case = "3.0.12"
_snake_case = None
def lowerCAmelCase_ ( ):
global _logger
_A : str = _logger or logging.getLogger(__name__ )
return _logger
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a ) -> List[str]:
_A : int = lock_file
return None
def __str__( self ) -> str:
_A : List[Any] = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class lowercase :
def __init__( self , _a ) -> Tuple:
_A : Optional[int] = lock
return None
def __enter__( self ) -> List[Any]:
return self.lock
def __exit__( self , _a , _a , _a ) -> List[Any]:
self.lock.release()
return None
class lowercase :
def __init__( self , _a , _a=-1 , _a=None ) -> List[Any]:
_A : List[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
_A : int = self.hash_filename_if_too_long(_a , _a )
# The path to the lock file.
_A : Optional[int] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_A : Optional[int] = None
# The default timeout value.
_A : Union[str, Any] = timeout
# We use this lock primarily for the lock counter.
_A : str = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_A : str = 0
return None
@property
def a__ ( self ) -> Dict:
return self._lock_file
@property
def a__ ( self ) -> Optional[Any]:
return self._timeout
@timeout.setter
def a__ ( self , _a ) -> Optional[int]:
_A : Dict = float(_a )
return None
def a__ ( self ) -> Optional[Any]:
raise NotImplementedError()
def a__ ( self ) -> int:
raise NotImplementedError()
@property
def a__ ( self ) -> Optional[Any]:
return self._lock_file_fd is not None
def a__ ( self , _a=None , _a=0.05 ) -> Dict:
# Use the default timeout, if no timeout is provided.
if timeout is None:
_A : Tuple = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_A : Optional[int] = id(self )
_A : str = self._lock_file
_A : Tuple = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(_a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_A : List[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def a__ ( self , _a=False ) -> Optional[int]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_A : Optional[Any] = id(self )
_A : str = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
_A : Tuple = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ) -> Any:
self.acquire()
return self
def __exit__( self , _a , _a , _a ) -> int:
self.release()
return None
def __del__( self ) -> List[Any]:
self.release(force=_a )
return None
def a__ ( self , _a , _a ) -> str:
_A : Optional[Any] = os.path.basename(_a )
if len(_a ) > max_length and max_length > 0:
_A : Dict = os.path.dirname(_a )
_A : Any = str(hash(_a ) )
_A : Tuple = filename[: max_length - len(_a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(_a , _a )
else:
return path
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a=-1 , _a=None ) -> Union[str, Any]:
from .file_utils import relative_to_absolute_path
super().__init__(_a , timeout=_a , max_filename_length=_a )
_A : Optional[Any] = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_A : Optional[Any] = os.open(self._lock_file , _a )
except OSError:
pass
else:
try:
msvcrt.locking(_a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(_a )
else:
_A : str = fd
return None
def a__ ( self ) -> int:
_A : str = self._lock_file_fd
_A : Any = None
msvcrt.locking(_a , msvcrt.LK_UNLCK , 1 )
os.close(_a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a=-1 , _a=None ) -> Optional[int]:
_A : List[Any] = os.statvfs(os.path.dirname(_a ) ).f_namemax
super().__init__(_a , timeout=_a , max_filename_length=_a )
def a__ ( self ) -> Any:
_A : Any = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_A : Any = os.open(self._lock_file , _a )
try:
fcntl.flock(_a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_a )
else:
_A : Dict = fd
return None
def a__ ( self ) -> Dict:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_A : Any = self._lock_file_fd
_A : Dict = None
fcntl.flock(_a , fcntl.LOCK_UN )
os.close(_a )
return None
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Tuple:
_A : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_A : Optional[Any] = os.open(self._lock_file , _a )
except OSError:
pass
else:
_A : str = fd
return None
def a__ ( self ) -> List[Any]:
os.close(self._lock_file_fd )
_A : Dict = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_snake_case = None
if msvcrt:
_snake_case = WindowsFileLock
elif fcntl:
_snake_case = UnixFileLock
else:
_snake_case = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 721
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
_A : Tuple = word_bank or []
# create a table
_A : int = len(snake_case_ ) + 1
_A : list[list[list[str]]] = []
for _ in range(snake_case_ ):
table.append([] )
# seed value
_A : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 54
| 0
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : List[str] = os.path.abspath(snake_case_ )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
_A : List[str] = tf.train.list_variables(snake_case_ )
_A : List[Any] = []
_A : Optional[Any] = []
_A : Union[str, Any] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_A : Tuple = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
_A : int = name[1:]
# figure out how many levels deep the name is
_A : Any = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(snake_case_ )
# read data
_A : List[str] = tf.train.load_variable(snake_case_,snake_case_ )
names.append("""/""".join(snake_case_ ) )
arrays.append(snake_case_ )
logger.info(f'''Read a total of {len(snake_case_ ):,} layers''' )
# Sanity check
if len(set(snake_case_ ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(snake_case_ ) )})''' )
_A : List[str] = list(set(snake_case_ ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(snake_case_,snake_case_ ):
_A : Optional[Any] = full_name.split("""/""" )
_A : Any = model
_A : Any = []
for i, m_name in enumerate(snake_case_ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
_A : Optional[Any] = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
_A : int = getattr(snake_case_,"""embeddings""" )
_A : Dict = getattr(snake_case_,"""LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
_A : Any = getattr(snake_case_,"""encoder""" )
_A : Tuple = getattr(snake_case_,"""layer""" )
_A : str = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
_A : str = getattr(snake_case_,"""pooler""" )
_A : Union[str, Any] = getattr(snake_case_,"""dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
_A : List[str] = getattr(snake_case_,"""embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
_A : Optional[Any] = getattr(snake_case_,"""word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
_A : Optional[Any] = getattr(snake_case_,"""position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
_A : Optional[Any] = getattr(snake_case_,"""token_type_embeddings""" )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append("""weight""" )
_A : Any = getattr(snake_case_,"""weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
_A : Optional[int] = getattr(snake_case_,"""attention""" )
_A : List[Any] = getattr(snake_case_,"""self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
_A : Optional[int] = getattr(snake_case_,"""attention""" )
_A : Tuple = getattr(snake_case_,"""output""" )
_A : int = getattr(snake_case_,"""LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
_A : Optional[Any] = getattr(snake_case_,"""attention""" )
_A : Any = getattr(snake_case_,"""output""" )
_A : Optional[int] = getattr(snake_case_,"""dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
_A : Any = getattr(snake_case_,"""output""" )
_A : Union[str, Any] = getattr(snake_case_,"""dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
_A : str = getattr(snake_case_,"""output""" )
_A : Union[str, Any] = getattr(snake_case_,"""LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
_A : Optional[int] = getattr(snake_case_,"""key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
_A : Optional[int] = getattr(snake_case_,"""query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
_A : Optional[Any] = getattr(snake_case_,"""value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
_A : Dict = getattr(snake_case_,"""intermediate""" )
_A : Union[str, Any] = getattr(snake_case_,"""dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
_A : int = getattr(snake_case_,"""output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
_A : List[str] = getattr(snake_case_,"""bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
_A : Optional[Any] = getattr(snake_case_,"""weight""" )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
_A : List[str] = """.""".join(snake_case_ )
if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""",snake_case_ ) or re.match(
r"""(\S+)\.attention\.output\.dense\.weight""",snake_case_ ):
_A : str = array.reshape(pointer.data.shape )
if "kernel" in full_name:
_A : Tuple = array.transpose()
if pointer.shape == array.shape:
_A : Union[str, Any] = torch.from_numpy(snake_case_ )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# Instantiate model
logger.info(f'''Loading model based on config from {config_path}...''' )
_A : Tuple = BertConfig.from_json_file(snake_case_ )
_A : Union[str, Any] = BertModel(snake_case_ )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(snake_case_,snake_case_,snake_case_ )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict(),snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
_snake_case = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 700
|
import operator
def lowerCAmelCase_ ( snake_case_,snake_case_ = False,snake_case_ = None ):
_A : str = operator.lt if reverse else operator.gt
_A : Optional[Any] = solution or []
if not arr:
return solution
_A : Dict = [arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_,sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
_A : Union[str, Any] = sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_,snake_case_ ):
solution.insert(snake_case_,snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_,snake_case_,snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 54
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_MASKED_LM_MAPPING
_a = TF_MODEL_FOR_MASKED_LM_MAPPING
def a__ ( self ) -> Tuple:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a__ ( self ) -> Any:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
_A : Tuple = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
_A : List[str] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> str:
_A : Any = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
_A : List[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
_A : str = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def a__ ( self ) -> Union[str, Any]:
_A : int = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
_A : Optional[Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_a , _a )
@slow
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(_a )
@slow
@require_tf
def a__ ( self ) -> Tuple:
_A : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(_a )
def a__ ( self , _a ) -> Tuple:
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
_A : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
_A : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> Tuple:
_A : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
_A : str = None
_A : Union[str, Any] = None
self.run_pipeline_test(_a , [] )
@require_tf
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
_A : Any = None
_A : Dict = None
self.run_pipeline_test(_a , [] )
def a__ ( self , _a , _a , _a ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_A : Optional[Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Tuple = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a__ ( self , _a , _a ) -> Dict:
_A : Dict = fill_masker.tokenizer
_A : List[str] = fill_masker.model
_A : List[str] = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Optional[Any] = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
with self.assertRaises(_a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_a ):
fill_masker("""This is""" )
self.run_test_top_k(_a , _a )
self.run_test_targets(_a , _a )
self.run_test_top_k_targets(_a , _a )
self.fill_mask_with_duplicate_targets_and_top_k(_a , _a )
self.fill_mask_with_multiple_masks(_a , _a )
def a__ ( self , _a , _a ) -> List[str]:
_A : int = tokenizer.get_vocab()
_A : str = sorted(vocab.keys() )[:2]
# Pipeline argument
_A : Tuple = FillMaskPipeline(model=_a , tokenizer=_a , targets=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Call argument
_A : str = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : int = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Score equivalence
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Optional[int] = [top_mask["""token_str"""] for top_mask in outputs]
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ) == set(_a ):
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
# Raises with invalid
with self.assertRaises(_a ):
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_a ):
_A : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(_a ):
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="""""" )
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : str = FillMaskPipeline(model=_a , tokenizer=_a , top_k=2 )
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Union[str, Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> List[Any]:
_A : Union[str, Any] = tokenizer.get_vocab()
_A : int = FillMaskPipeline(model=_a , tokenizer=_a )
# top_k=2, ntargets=3
_A : List[str] = sorted(vocab.keys() )[:3]
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_A : Any = [el["""token_str"""] for el in sorted(_a , key=lambda _a : x["score"] , reverse=_a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ).issubset(_a ):
_A : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> str:
_A : Optional[int] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
_A : Optional[Any] = sorted(vocab.keys() )[:3]
_A : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_A : Union[str, Any] = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_a , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_a ) , 3 )
def a__ ( self , _a , _a ) -> Tuple:
_A : Any = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[Any] = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
| 54
| 0
|
from math import ceil
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = list(range(0,snake_case_ ) )
_A : Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_A : Optional[int] = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case_ )
# Missing blocks
_A : List[str] = [i for i in blocks if i not in device_map_blocks]
_A : Optional[Any] = [i for i in device_map_blocks if i not in blocks]
if len(snake_case_ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(snake_case_ ) )
if len(snake_case_ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(snake_case_ ) )
if len(snake_case_ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = list(range(snake_case_ ) )
_A : Tuple = int(ceil(n_layers / len(snake_case_ ) ) )
_A : int = [layers[i : i + n_blocks] for i in range(0,snake_case_,snake_case_ )]
return dict(zip(snake_case_,snake_case_ ) )
| 702
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def a__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
_A : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_A : str = dict(zip(_a , range(len(_a ) ) ) )
_A : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_A : str = {"""unk_token""": """<unk>"""}
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> str:
_A : Tuple = """lower newer"""
_A : Optional[Any] = """lower newer"""
return input_text, output_text
def a__ ( self ) -> List[Any]:
_A : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A : str = """lower newer"""
_A : List[str] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_A : int = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : str = tokens + [tokenizer.unk_token]
_A : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@require_ftfy
def a__ ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : Dict = self.tokenizer_class.from_pretrained(_a , **_a )
_A : str = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_A : List[str] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_A : Tuple = """xa\u0303y""" + """ """ + """x\xe3y"""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of space type
_A : Any = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_A : Optional[int] = tokenizer_s.tokenize(_a )
_A : List[Any] = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of line break type
_A : int = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_A : Optional[Any] = tokenizer_s.tokenize(_a )
_A : Tuple = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_A : str = F'''{text_of_1_token} {text_of_1_token}'''
_A : str = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Dict = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_A : Any = F''' {text}'''
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
def a__ ( self ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_a ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def a__ ( self ) -> str:
super().test_tokenization_python_rust_equals()
def a__ ( self ) -> Union[str, Any]:
# CLIP always lower cases letters
pass
| 54
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_snake_case = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703
|
from datetime import datetime as dt
import os
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCAmelCase_ ( ):
_A : int = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Tuple = g.get_repo("""huggingface/transformers""" )
_A : Dict = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Optional[Any] = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 54
| 0
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase ( unittest.TestCase ):
@property
def a__ ( self ) -> int:
torch.manual_seed(0 )
_A : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a__ ( self ) -> Dict:
_A : List[str] = self.dummy_uncond_unet
_A : Tuple = KarrasVeScheduler()
_A : Any = KarrasVePipeline(unet=_a , scheduler=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Dict = torch.manual_seed(0 )
_A : Optional[Any] = pipe(num_inference_steps=2 , generator=_a , output_type="""numpy""" ).images
_A : Optional[Any] = torch.manual_seed(0 )
_A : Tuple = pipe(num_inference_steps=2 , generator=_a , output_type="""numpy""" , return_dict=_a )[0]
_A : Tuple = image[0, -3:, -3:, -1]
_A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Tuple:
_A : int = """google/ncsnpp-celebahq-256"""
_A : str = UNetaDModel.from_pretrained(_a )
_A : int = KarrasVeScheduler()
_A : str = KarrasVePipeline(unet=_a , scheduler=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : int = torch.manual_seed(0 )
_A : Tuple = pipe(num_inference_steps=20 , generator=_a , output_type="""numpy""" ).images
_A : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_A : Optional[Any] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 704
|
from __future__ import annotations
class lowercase :
def __init__( self , _a = 0 ) -> str:
_A : Any = key
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def a__ ( self , _a , _a ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54
| 0
|
def lowerCAmelCase_ ( snake_case_ ):
_A : str = set()
# edges = list of graph's edges
_A : Optional[Any] = get_edges(snake_case_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_A : Optional[int] = edges.pop()
chosen_vertices.add(snake_case_ )
chosen_vertices.add(snake_case_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(snake_case_ )
return chosen_vertices
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 705
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=10 , _a=160 , _a=8 , _a=0.0 , _a=4000 , _a=False , _a=True , ) -> Optional[int]:
_A : Any = parent
_A : List[Any] = batch_size
_A : List[Any] = min_seq_length
_A : Dict = max_seq_length
_A : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = padding_value
_A : Tuple = sampling_rate
_A : str = return_attention_mask
_A : Any = do_normalize
_A : Union[str, Any] = feature_size
_A : List[Any] = chunk_length
_A : List[Any] = hop_length
def a__ ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , _a=False , _a=False ) -> List[str]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : Any = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = WhisperFeatureExtractor if is_speech_available() else None
def a__ ( self ) -> Tuple:
_A : Optional[int] = WhisperFeatureExtractionTester(self )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Tuple = feat_extract_first.to_dict()
_A : List[Any] = feat_extract_second.to_dict()
_A : List[Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Dict:
_A : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Dict = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Optional[int] = self.feature_extraction_class.from_json_file(_a )
_A : str = feat_extract_first.to_dict()
_A : Any = feat_extract_second.to_dict()
_A : Union[str, Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_A : Dict = feature_extractor(_a , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_A : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_A : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : Tuple = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Any = np.asarray(_a )
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : int = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_A : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs]
_A : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
_A : Optional[int] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : List[Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Dict:
import torch
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
_A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Optional[Any] = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Tuple:
# fmt: off
_A : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_A : Dict = self._load_datasamples(1 )
_A : Optional[Any] = WhisperFeatureExtractor()
_A : Optional[Any] = feature_extractor(_a , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def a__ ( self ) -> str:
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : str = self._load_datasamples(1 )[0]
_A : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_A : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 54
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
_snake_case = logging.get_logger(__name__)
# General docstring
_snake_case = "MobileNetV1Config"
# Base docstring
_snake_case = "google/mobilenet_v1_1.0_224"
_snake_case = [1, 1024, 7, 7]
# Image classification docstring
_snake_case = "google/mobilenet_v1_1.0_224"
_snake_case = "tabby, tabby cat"
_snake_case = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=None ):
_A : Optional[int] = {}
if isinstance(snake_case_,snake_case_ ):
_A : str = model.mobilenet_va
else:
_A : Any = model
_A : List[str] = """MobilenetV1/Conv2d_0/"""
_A : Dict = backbone.conv_stem.convolution.weight
_A : Tuple = backbone.conv_stem.normalization.bias
_A : int = backbone.conv_stem.normalization.weight
_A : int = backbone.conv_stem.normalization.running_mean
_A : Tuple = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_A : Union[str, Any] = i + 1
_A : Tuple = i * 2
_A : int = backbone.layer[pt_index]
_A : List[str] = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
_A : Optional[int] = pointer.convolution.weight
_A : int = pointer.normalization.bias
_A : int = pointer.normalization.weight
_A : Tuple = pointer.normalization.running_mean
_A : Any = pointer.normalization.running_var
_A : List[str] = backbone.layer[pt_index + 1]
_A : Union[str, Any] = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
_A : Optional[int] = pointer.convolution.weight
_A : List[str] = pointer.normalization.bias
_A : Dict = pointer.normalization.weight
_A : List[str] = pointer.normalization.running_mean
_A : str = pointer.normalization.running_var
if isinstance(snake_case_,snake_case_ ):
_A : str = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_A : Dict = model.classifier.weight
_A : Optional[Any] = model.classifier.bias
return tf_to_pt_map
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_A : Optional[Any] = tf.train.list_variables(snake_case_ )
_A : List[str] = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''' )
_A : List[Any] = tf.train.load_variable(snake_case_,snake_case_ )
_A : Tuple = array
# Build TF to PyTorch weights loading map
_A : Any = _build_tf_to_pytorch_map(snake_case_,snake_case_,snake_case_ )
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''' )
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''' )
continue
_A : Tuple = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_A : Optional[int] = np.transpose(snake_case_,(2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_A : Any = array.squeeze().transpose()
else:
_A : Optional[int] = np.transpose(snake_case_,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' )
_A : Optional[int] = torch.from_numpy(snake_case_ )
tf_weights.pop(snake_case_,snake_case_ )
tf_weights.pop(name + """/RMSProp""",snake_case_ )
tf_weights.pop(name + """/RMSProp_1""",snake_case_ )
tf_weights.pop(name + """/ExponentialMovingAverage""",snake_case_ )
logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Tuple = features.shape[-2:]
_A : Any = conv_layer.stride
_A : Dict = conv_layer.kernel_size
if in_height % stride_height == 0:
_A : Tuple = max(kernel_height - stride_height,0 )
else:
_A : Dict = max(kernel_height - (in_height % stride_height),0 )
if in_width % stride_width == 0:
_A : Any = max(kernel_width - stride_width,0 )
else:
_A : Tuple = max(kernel_width - (in_width % stride_width),0 )
_A : Dict = pad_along_width // 2
_A : Any = pad_along_width - pad_left
_A : Optional[int] = pad_along_height // 2
_A : Tuple = pad_along_height - pad_top
_A : List[str] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case_,snake_case_,"""constant""",0.0 )
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a , _a , _a = 1 , _a = 1 , _a = False , _a = True , _a = True , ) -> None:
super().__init__()
_A : List[Any] = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
_A : Tuple = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_A : List[str] = nn.Convad(
in_channels=_a , out_channels=_a , kernel_size=_a , stride=_a , padding=_a , groups=_a , bias=_a , padding_mode="""zeros""" , )
if use_normalization:
_A : Union[str, Any] = nn.BatchNormad(
num_features=_a , eps=config.layer_norm_eps , momentum=0.9997 , affine=_a , track_running_stats=_a , )
else:
_A : Optional[int] = None
if use_activation:
if isinstance(_a , _a ):
_A : List[str] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _a ):
_A : List[str] = ACTaFN[config.hidden_act]
else:
_A : Optional[int] = config.hidden_act
else:
_A : Tuple = None
def a__ ( self , _a ) -> torch.Tensor:
if self.config.tf_padding:
_A : Tuple = apply_tf_padding(_a , self.convolution )
_A : Tuple = self.convolution(_a )
if self.normalization is not None:
_A : Dict = self.normalization(_a )
if self.activation is not None:
_A : str = self.activation(_a )
return features
class lowercase ( UpperCamelCase__ ):
_a = MobileNetVaConfig
_a = load_tf_weights_in_mobilenet_va
_a = "mobilenet_v1"
_a = "pixel_values"
_a = False
def a__ ( self , _a ) -> None:
if isinstance(_a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
_snake_case = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_snake_case = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a = True ) -> Union[str, Any]:
super().__init__(_a )
_A : Any = config
_A : Optional[int] = 32
_A : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
_A : List[Any] = MobileNetVaConvLayer(
_a , in_channels=config.num_channels , out_channels=_a , kernel_size=3 , stride=2 , )
_A : Dict = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_A : Union[str, Any] = nn.ModuleList()
for i in range(13 ):
_A : int = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_A : int = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_a , in_channels=_a , out_channels=_a , kernel_size=3 , stride=strides[i] , groups=_a , ) )
self.layer.append(
MobileNetVaConvLayer(
_a , in_channels=_a , out_channels=_a , kernel_size=1 , ) )
_A : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def a__ ( self , _a ) -> List[Any]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a__ ( self , _a = None , _a = None , _a = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
_A : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_A : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_A : str = self.conv_stem(_a )
_A : int = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_A : str = layer_module(_a )
if output_hidden_states:
_A : Optional[Any] = all_hidden_states + (hidden_states,)
_A : Optional[int] = hidden_states
if self.pooler is not None:
_A : int = torch.flatten(self.pooler(_a ) , start_dim=1 )
else:
_A : Dict = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_a , pooler_output=_a , hidden_states=_a , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a ) -> None:
super().__init__(_a )
_A : Optional[Any] = config.num_labels
_A : Union[str, Any] = MobileNetVaModel(_a )
_A : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_A : List[Any] = nn.Dropout(config.classifier_dropout_prob , inplace=_a )
_A : Optional[Any] = nn.Linear(_a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a__ ( self , _a = None , _a = None , _a = None , _a = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
_A : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_A : Tuple = self.mobilenet_va(_a , output_hidden_states=_a , return_dict=_a )
_A : List[Any] = outputs.pooler_output if return_dict else outputs[1]
_A : Any = self.classifier(self.dropout(_a ) )
_A : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_A : int = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_A : Tuple = """single_label_classification"""
else:
_A : Optional[Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
_A : Any = MSELoss()
if self.num_labels == 1:
_A : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_A : str = loss_fct(_a , _a )
elif self.config.problem_type == "single_label_classification":
_A : str = CrossEntropyLoss()
_A : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_A : Tuple = BCEWithLogitsLoss()
_A : Optional[Any] = loss_fct(_a , _a )
if not return_dict:
_A : Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_a , logits=_a , hidden_states=outputs.hidden_states , )
| 706
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( snake_case_ ):
return data[1:] + data[0]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = """"""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = int("""0b""" + data[0] + data[-1],2 )
_A : Any = int("""0b""" + data[1:3],2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = message[:4]
_A : List[Any] = message[4:]
_A : Union[str, Any] = apply_table(snake_case_,snake_case_ )
_A : List[Any] = xor(snake_case_,snake_case_ )
_A : Optional[Any] = apply_sbox(snake_case_,temp[:4] ) # noqa: E741
_A : List[Any] = apply_sbox(snake_case_,temp[4:] )
_A : int = """0""" * (2 - len(snake_case_ )) + l # noqa: E741
_A : Union[str, Any] = """0""" * (2 - len(snake_case_ )) + r
_A : List[Any] = apply_table(l + r,snake_case_ )
_A : Any = xor(snake_case_,snake_case_ )
return temp + right
if __name__ == "__main__":
_snake_case = input("Enter 10 bit key: ")
_snake_case = input("Enter 8 bit message: ")
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 54
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = tempfile.mkdtemp()
# fmt: off
_A : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
_A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_A : Optional[Any] = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
_A : List[Any] = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_a , _a )
def a__ ( self , **_a ) -> int:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> List[str]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def a__ ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> List[str]:
_A : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_A : int = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self ) -> Dict:
_A : Any = self.get_tokenizer()
_A : int = self.get_image_processor()
_A : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_A : int = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def a__ ( self ) -> int:
_A : List[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A : Dict = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_A : Tuple = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def a__ ( self ) -> Optional[int]:
_A : List[Any] = self.get_image_processor()
_A : Optional[Any] = self.get_tokenizer()
_A : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_A : List[str] = self.prepare_image_inputs()
_A : List[str] = image_processor(_a , return_tensors="""np""" )
_A : Optional[Any] = processor(images=_a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a__ ( self ) -> List[Any]:
_A : Optional[Any] = self.get_image_processor()
_A : Optional[int] = self.get_tokenizer()
_A : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_A : Dict = """lower newer"""
_A : Tuple = processor(text=_a )
_A : Tuple = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a__ ( self ) -> int:
_A : List[str] = self.get_image_processor()
_A : Any = self.get_tokenizer()
_A : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_A : Tuple = """lower newer"""
_A : Any = self.prepare_image_inputs()
_A : List[str] = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def a__ ( self ) -> Union[str, Any]:
_A : Dict = self.get_image_processor()
_A : Optional[Any] = self.get_tokenizer()
_A : Dict = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A : int = processor.batch_decode(_a )
_A : Union[str, Any] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Optional[Any]:
_A : int = self.get_image_processor()
_A : Any = self.get_tokenizer()
_A : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_A : int = """lower newer"""
_A : Optional[int] = self.prepare_image_inputs()
_A : int = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 707
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Any = size if size is not None else {"""height""": 18, """width""": 18}
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : List[Any] = num_channels
_A : List[str] = image_size
_A : Optional[Any] = min_resolution
_A : List[Any] = max_resolution
_A : Optional[Any] = do_resize
_A : str = size
_A : List[str] = do_normalize
_A : Dict = image_mean
_A : int = image_std
def a__ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> List[str]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 54
| 0
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def a__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
_A : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_A : str = dict(zip(_a , range(len(_a ) ) ) )
_A : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_A : str = {"""unk_token""": """<unk>"""}
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> str:
_A : Tuple = """lower newer"""
_A : Optional[Any] = """lower newer"""
return input_text, output_text
def a__ ( self ) -> List[Any]:
_A : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A : str = """lower newer"""
_A : List[str] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_A : int = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : str = tokens + [tokenizer.unk_token]
_A : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@require_ftfy
def a__ ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : Dict = self.tokenizer_class.from_pretrained(_a , **_a )
_A : str = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_A : List[str] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_A : Tuple = """xa\u0303y""" + """ """ + """x\xe3y"""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of space type
_A : Any = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_A : Optional[int] = tokenizer_s.tokenize(_a )
_A : List[Any] = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of line break type
_A : int = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_A : Optional[Any] = tokenizer_s.tokenize(_a )
_A : Tuple = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_A : str = F'''{text_of_1_token} {text_of_1_token}'''
_A : str = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Dict = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_A : Any = F''' {text}'''
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
def a__ ( self ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_a ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def a__ ( self ) -> str:
super().test_tokenization_python_rust_equals()
def a__ ( self ) -> Union[str, Any]:
# CLIP always lower cases letters
pass
| 708
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a=None , _a=None , *_a , **_a ) -> Optional[int]:
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_A : Optional[Any] = self.model.config
else:
_A : int = config
_A : Optional[Any] = data_args
_A : int = self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_A : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_A : Union[str, Any] = label_smoothed_nll_loss
def a__ ( self , _a ) -> int:
if self.optimizer is None:
_A : List[str] = ["""bias""", """LayerNorm.weight"""]
_A : str = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_A : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_A : Dict = Adafactor
_A : int = {"""scale_parameter""": False, """relative_step""": False}
else:
_A : int = AdamW
_A : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_A : List[str] = self.args.learning_rate
if self.sharded_ddp:
_A : List[str] = OSS(
params=_a , optim=_a , **_a , )
else:
_A : Tuple = optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
_A : Union[str, Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def a__ ( self , _a ) -> Dict:
_A : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_A : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_A : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_A : List[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def a__ ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def a__ ( self , _a , _a , _a ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_A : List[str] = model(**_a , use_cache=_a )[0]
_A : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_A , _A : str = model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
_A : Any = model(**_a , use_cache=_a )[0]
_A : Union[str, Any] = torch.nn.functional.log_softmax(_a , dim=-1 )
_A , _A : List[str] = self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def a__ ( self , _a , _a ) -> List[Any]:
_A : Optional[int] = inputs.pop("""labels""" )
_A , _A : Dict = self._compute_loss(_a , _a , _a )
return loss
def a__ ( self , _a , _a , _a , _a = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_A : int = self._prepare_inputs(_a )
_A : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_A : List[str] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_A : str = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
_A : Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_A , _A : Tuple = self._compute_loss(_a , _a , _a )
_A : Any = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_A : List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_A : Any = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def a__ ( self , _a , _a ) -> Union[str, Any]:
# If PAD token is not defined at least EOS token has to be defined
_A : Optional[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F''' padded to `max_length`={max_length}''' )
_A : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_A : Dict = tensor
return padded_tensor
| 54
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> int:
_A : Tuple = tempfile.mkdtemp()
_A : List[str] = BlipImageProcessor()
_A : List[Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
_A : List[str] = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
_A : Any = InstructBlipProcessor(_a , _a , _a )
processor.save_pretrained(self.tmpdirname )
def a__ ( self , **_a ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).tokenizer
def a__ ( self , **_a ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def a__ ( self , **_a ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).qformer_tokenizer
def a__ ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> Optional[Any]:
_A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_A : Optional[int] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self ) -> Optional[Any]:
_A : Union[str, Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_A : List[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A : str = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_A : Dict = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
self.assertIsInstance(processor.qformer_tokenizer , _a )
def a__ ( self ) -> Any:
_A : Tuple = self.get_image_processor()
_A : List[str] = self.get_tokenizer()
_A : Tuple = self.get_qformer_tokenizer()
_A : Optional[int] = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_A : Union[str, Any] = self.prepare_image_inputs()
_A : str = image_processor(_a , return_tensors="""np""" )
_A : Optional[int] = processor(images=_a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a__ ( self ) -> Any:
_A : Optional[int] = self.get_image_processor()
_A : Tuple = self.get_tokenizer()
_A : Any = self.get_qformer_tokenizer()
_A : str = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_A : Union[str, Any] = """lower newer"""
_A : Dict = processor(text=_a )
_A : Optional[int] = tokenizer(_a , return_token_type_ids=_a )
_A : int = qformer_tokenizer(_a , return_token_type_ids=_a )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def a__ ( self ) -> Tuple:
_A : Dict = self.get_image_processor()
_A : str = self.get_tokenizer()
_A : List[Any] = self.get_qformer_tokenizer()
_A : str = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_A : Any = """lower newer"""
_A : Union[str, Any] = self.prepare_image_inputs()
_A : List[Any] = processor(text=_a , images=_a )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def a__ ( self ) -> str:
_A : str = self.get_image_processor()
_A : List[Any] = self.get_tokenizer()
_A : List[Any] = self.get_qformer_tokenizer()
_A : Dict = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_A : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A : List[Any] = processor.batch_decode(_a )
_A : List[str] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.get_image_processor()
_A : Union[str, Any] = self.get_tokenizer()
_A : int = self.get_qformer_tokenizer()
_A : Dict = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_A : Union[str, Any] = """lower newer"""
_A : Any = self.prepare_image_inputs()
_A : List[Any] = processor(text=_a , images=_a )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 709
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_,snake_case_ ) -> bool:
_A : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A : Optional[int] = mean(
int(is_in_circle(uniform(-1.0,1.0 ),uniform(-1.0,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
_A : List[str] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0.0,snake_case_ = 1.0,):
return mean(
function_to_integrate(uniform(snake_case_,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
def identity_function(snake_case_ ) -> float:
return x
_A : Any = area_under_curve_estimator(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def lowerCAmelCase_ ( snake_case_ ):
def function_to_integrate(snake_case_ ) -> float:
return sqrt(4.0 - x * x )
_A : Optional[int] = area_under_curve_estimator(
snake_case_,snake_case_,0.0,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt"}
_snake_case = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
_snake_case = {
"YituTech/conv-bert-base": 512,
"YituTech/conv-bert-medium-small": 512,
"YituTech/conv-bert-small": 512,
}
_snake_case = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_INIT_CONFIGURATION
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ConvBertTokenizer
def __init__( self , _a=None , _a=None , _a=True , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a=True , _a=None , **_a , ) -> Any:
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
_A : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _a ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _a ) != tokenize_chinese_chars
):
_A : str = getattr(_a , normalizer_state.pop("""type""" ) )
_A : Optional[Any] = do_lower_case
_A : List[str] = strip_accents
_A : Any = tokenize_chinese_chars
_A : Any = normalizer_class(**_a )
_A : Union[str, Any] = do_lower_case
def a__ ( self , _a , _a=None ) -> Dict:
_A : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Optional[int] = [self.sep_token_id]
_A : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , _a , _a = None ) -> Tuple[str]:
_A : Any = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 710
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> int:
_A : int = tempfile.mkdtemp()
_A : Union[str, Any] = 8
# DPR tok
_A : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_A : List[str] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_A : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_A : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_A : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A : Dict = {"""unk_token""": """<unk>"""}
_A : Optional[Any] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_A : List[Any] = os.path.join(_a , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def a__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def a__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def a__ ( self ) -> str:
_A : Optional[Any] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
_A : int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_A : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_a )
rag_tokenizer.save_pretrained(_a )
_A : Optional[Any] = RagTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def a__ ( self ) -> str:
_A : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
_A : Tuple = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Tuple = tokenizer(_a )
self.assertIsNotNone(_a )
@slow
def a__ ( self ) -> Dict:
_A : Dict = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
_A : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Optional[Any] = tokenizer(_a )
self.assertIsNotNone(_a )
| 54
| 0
|
class lowercase :
def __init__( self , _a , _a , _a ) -> Optional[int]:
_A : Optional[int] = None
_A : Any = None
_A : List[str] = graph
self._normalize_graph(_a , _a )
_A : Tuple = len(_a )
_A : Dict = None
def a__ ( self , _a , _a ) -> int:
if sources is int:
_A : Optional[Any] = [sources]
if sinks is int:
_A : str = [sinks]
if len(_a ) == 0 or len(_a ) == 0:
return
_A : Any = sources[0]
_A : Any = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_a ) > 1 or len(_a ) > 1:
_A : int = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A : int = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A : List[str] = max_input_flow
_A : Any = 0
_A : Dict = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A : List[Any] = max_input_flow
_A : Tuple = size - 1
def a__ ( self ) -> int:
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def a__ ( self , _a ) -> Optional[Any]:
_A : Union[str, Any] = algorithm(self )
class lowercase :
def __init__( self , _a ) -> List[str]:
_A : str = flow_network
_A : int = flow_network.verticesCount
_A : Optional[Any] = flow_network.sourceIndex
_A : List[str] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A : List[str] = flow_network.graph
_A : Tuple = False
def a__ ( self ) -> Union[str, Any]:
if not self.executed:
self._algorithm()
_A : List[Any] = True
def a__ ( self ) -> Dict:
pass
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a ) -> Dict:
super().__init__(_a )
# use this to save your result
_A : str = -1
def a__ ( self ) -> Tuple:
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a ) -> List[Any]:
super().__init__(_a )
_A : int = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A : str = [0] * self.verticies_count
_A : Any = [0] * self.verticies_count
def a__ ( self ) -> Any:
_A : str = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A : Tuple = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A : List[Any] = 0
while i < len(_a ):
_A : List[str] = vertices_list[i]
_A : str = self.heights[vertex_index]
self.process_vertex(_a )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_a ) )
_A : int = 0
else:
i += 1
_A : List[str] = sum(self.preflow[self.source_index] )
def a__ ( self , _a ) -> Tuple:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_a , _a )
self.relabel(_a )
def a__ ( self , _a , _a ) -> Tuple:
_A : str = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A : Optional[int] = self.heights[to_index]
if min_height is not None:
_A : str = min_height + 1
if __name__ == "__main__":
_snake_case = [0]
_snake_case = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
_snake_case = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
_snake_case = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
_snake_case = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 711
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_A : str = self.transformer_dir
shutil.copy(
os.path.join(_a , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a__ ( self ) -> Optional[int]:
_A : List[str] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a__ ( self , _a , _a , _a , _a=None ) -> Optional[Any]:
_A : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_A : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_A : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_A : Optional[int] = black.format_str(_a , mode=_a )
_A : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_a , """w""" , newline="""\n""" ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_a )
with open(_a , """r""" ) as f:
self.assertTrue(f.read() , _a )
def a__ ( self ) -> str:
_A : Union[str, Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _a , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _a ) , )
# Copy consistency with a really long name
_A : List[str] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , _a , _a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _a , overwrite_result=re.sub("""Bert""" , """TestModel""" , _a ) , )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_A , _A : Tuple = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
self.assertFalse(_a )
self.assertEqual(_a , _a )
_A , _A : List[str] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_a )
_A : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_A : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A , _A : Optional[int] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_a , _a )
| 54
| 0
|
from __future__ import annotations
from typing import Any
def lowerCAmelCase_ ( snake_case_ ):
if not postfix_notation:
return 0
_A : Any = {"""+""", """-""", """*""", """/"""}
_A : list[Any] = []
for token in postfix_notation:
if token in operations:
_A : Optional[Any] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(snake_case_ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=2048 , _a=128 , _a=1 , _a=512 , _a=30 , _a=4_4100 , ) -> Tuple:
_A : Any = parent
_A : str = batch_size
_A : Union[str, Any] = min_seq_length
_A : int = max_seq_length
_A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = spectrogram_length
_A : int = feature_size
_A : str = num_audio_channels
_A : Tuple = hop_length
_A : List[str] = chunk_length
_A : Union[str, Any] = sampling_rate
def a__ ( self ) -> Tuple:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a__ ( self , _a=False , _a=False ) -> Optional[int]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : List[Any] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = TvltFeatureExtractor
def a__ ( self ) -> Any:
_A : int = TvltFeatureExtractionTester(self )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , """spectrogram_length""" ) )
self.assertTrue(hasattr(_a , """feature_size""" ) )
self.assertTrue(hasattr(_a , """num_audio_channels""" ) )
self.assertTrue(hasattr(_a , """hop_length""" ) )
self.assertTrue(hasattr(_a , """chunk_length""" ) )
self.assertTrue(hasattr(_a , """sampling_rate""" ) )
def a__ ( self ) -> Optional[int]:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : int = feat_extract_second.to_dict()
_A : int = dict_first.pop("""mel_filters""" )
_A : Optional[int] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Union[str, Any] = self.feature_extraction_class.from_json_file(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : Union[str, Any] = feat_extract_second.to_dict()
_A : List[Any] = dict_first.pop("""mel_filters""" )
_A : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Optional[Any]:
# Initialize feature_extractor
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_A : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A : str = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A : Optional[Any] = feature_extractor(
_a , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Optional[int] = np.asarray(_a )
_A : Optional[Any] = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Dict = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self._load_datasamples(1 )
_A : List[str] = TvltFeatureExtractor()
_A : List[Any] = feature_extractor(_a , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A : int = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 54
| 0
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=32 , _a=3 , _a=4 , _a=[10, 20, 30, 40] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=37 , _a="gelu" , _a=10 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=None , ) -> str:
_A : Optional[int] = parent
_A : List[Any] = batch_size
_A : List[str] = image_size
_A : Optional[int] = num_channels
_A : Optional[Any] = num_stages
_A : Optional[int] = hidden_sizes
_A : Optional[Any] = depths
_A : int = is_training
_A : Any = use_labels
_A : Optional[Any] = intermediate_size
_A : Optional[int] = hidden_act
_A : Union[str, Any] = num_labels
_A : Dict = initializer_range
_A : Dict = out_features
_A : List[str] = out_indices
_A : int = scope
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : Optional[Any] = None
if self.use_labels:
_A : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_A : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Dict:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> Any:
_A : int = ConvNextModel(config=_a )
model.to(_a )
model.eval()
_A : Union[str, Any] = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> Optional[int]:
_A : List[str] = ConvNextForImageClassification(_a )
model.to(_a )
model.eval()
_A : Dict = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a ) -> Optional[int]:
_A : str = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : List[str] = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_A : Dict = None
_A : Dict = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : Optional[Any] = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self ) -> Any:
_A : List[str] = self.prepare_config_and_inputs()
_A : Tuple = config_and_inputs
_A : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_a = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Dict:
_A : Tuple = ConvNextModelTester(self )
_A : Optional[int] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> List[str]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self ) -> Any:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self ) -> List[str]:
pass
def a__ ( self ) -> Optional[int]:
_A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Any = model_class(_a )
_A : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : Optional[int] = [*signature.parameters.keys()]
_A : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> int:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Union[str, Any]:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def a__ ( self ) -> Optional[Any]:
def check_hidden_states_output(_a , _a , _a ):
_A : Any = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : str = model(**self._prepare_for_class(_a , _a ) )
_A : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Dict = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[Any] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> List[str]:
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> List[str]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = ConvNextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
'''simple docstring'''
_A : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> Dict:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> List[str]:
_A : List[Any] = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_a )
_A : Optional[Any] = self.default_image_processor
_A : Union[str, Any] = prepare_img()
_A : int = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : Tuple = model(**_a )
# verify the logits
_A : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : str = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class lowercase ( unittest.TestCase,UpperCamelCase__ ):
_a = (ConvNextBackbone,) if is_torch_available() else ()
_a = ConvNextConfig
_a = False
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = ConvNextModelTester(self )
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["OwlViTFeatureExtractor"]
_snake_case = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 714
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaImgaImgPipeline
_a = ["image_embeds", "negative_image_embeds", "image"]
_a = [
"image_embeds",
"negative_image_embeds",
"image",
]
_a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> int:
return 32
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> List[str]:
return self.time_input_dim
@property
def a__ ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> str:
return 100
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Union[str, Any] = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> int:
_A : Any = self.dummy_unet
_A : List[Any] = self.dummy_movq
_A : str = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_A : int = DDIMScheduler(**_a )
_A : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> str:
_A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
if str(_a ).startswith("""mps""" ):
_A : Tuple = torch.manual_seed(_a )
else:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Optional[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Union[str, Any]:
_A : Dict = """cpu"""
_A : int = self.get_dummy_components()
_A : Optional[int] = self.pipeline_class(**_a )
_A : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : Dict = output.images
_A : List[str] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : Dict = image[0, -3:, -3:, -1]
_A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[int] = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> List[str]:
_A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_A : Dict = """A red cartoon frog, 4k"""
_A : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : int = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_A : Dict = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A , _A : List[str] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_A : int = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 54
| 0
|
def lowerCAmelCase_ ( snake_case_ = 200 ):
_A : Optional[Any] = [1, 2, 5, 10, 20, 50, 100, 200]
_A : List[str] = [0] * (pence + 1)
_A : int = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(snake_case_,pence + 1,1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 715
|
def lowerCAmelCase_ ( snake_case_ = 1000000 ):
_A : Any = limit + 1
_A : Tuple = [0] * limit
for first_term in range(1,snake_case_ ):
for n in range(snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_A : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
| 0
|
def lowerCAmelCase_ ( snake_case_ = 1000000 ):
_A : Any = limit + 1
_A : Tuple = [0] * limit
for first_term in range(1,snake_case_ ):
for n in range(snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_A : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 716
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Any:
super().__init__()
_A : Dict = pad_token_id
_A : List[Any] = max_length
_A : Optional[int] = vocab
_A : Optional[int] = merges
_A : Optional[int] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> str:
_A : Any = [""" """.join(_a ) for m in tokenizer.bpe_ranks.keys()]
_A : str = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> List[Any]:
_A : Union[str, Any] = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def a__ ( cls , _a ) -> Union[str, Any]:
return cls(**_a )
def a__ ( self ) -> Union[str, Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ ( self , _a , _a = None ) -> int:
_A : Optional[int] = self.tf_tokenizer(_a )
_A : Tuple = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A : Dict = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCAmelCase_ ( snake_case_=None ):
_A : List[str] = argparse.ArgumentParser(add_help=snake_case_,allow_abbrev=snake_case_ )
# The main config parser
_A : Union[str, Any] = config_command_parser(snake_case_ )
# The subparser to add commands to
_A : str = config_parser.add_subparsers(title="""subcommands""",dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(snake_case_,parents=[parent_parser] )
update_command_parser(snake_case_,parents=[parent_parser] )
return config_parser
def lowerCAmelCase_ ( ):
_A : Optional[int] = get_config_parser()
_A : Union[str, Any] = config_parser.parse_args()
if not hasattr(snake_case_,"""func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(snake_case_ )
if __name__ == "__main__":
main()
| 717
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54
| 0
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Any = size if size is not None else {"""height""": 18, """width""": 18}
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : List[Any] = num_channels
_A : List[str] = image_size
_A : Optional[Any] = min_resolution
_A : List[Any] = max_resolution
_A : Optional[Any] = do_resize
_A : str = size
_A : List[str] = do_normalize
_A : Dict = image_mean
_A : int = image_std
def a__ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> List[str]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 718
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 0
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = FunnelTokenizer
_a = FunnelTokenizerFast
_a = True
_a = True
def a__ ( self ) -> List[Any]:
super().setUp()
_A : Union[str, Any] = [
"""<unk>""",
"""<cls>""",
"""<sep>""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def a__ ( self , **_a ) -> Dict:
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> Optional[Any]:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> Any:
_A : int = """UNwant\u00E9d,running"""
_A : Union[str, Any] = """unwanted, running"""
return input_text, output_text
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self.tokenizer_class(self.vocab_file )
_A : int = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_a , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 10, 8, 9] )
def a__ ( self ) -> Union[str, Any]:
_A : Any = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
_A : str = tokenizer("""UNwant\u00E9d,running""" )
_A : List[Any] = len(inputs["""input_ids"""] ) - 1
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len )
_A : Tuple = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" )
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
| 719
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Optional[int]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_a )
def a__ ( self ) -> Any:
_A : str = self._create_example_records()
_A : List[Any] = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def a__ ( self ) -> List[str]:
_A : Dict = self._create_example_records()
_A : List[str] = Dataset.from_list(_a )
_A : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self ) -> str: # checks what happens with missing columns
_A : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A : List[str] = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a__ ( self ) -> Dict: # checks if the type can be inferred from the second record
_A : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A : str = Dataset.from_list(_a )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a__ ( self ) -> Dict:
_A : List[str] = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 54
| 0
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 720
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(snake_case_ )
_A : List[Any] = list(snake_case_ )
_A : Tuple = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_A : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
while True:
_A : int = ["""$"""] * len(snake_case_ )
_A : Any = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1,len(snake_case_ ) ):
_A : Tuple = compare_string(binary[i],binary[j] )
if k is False:
_A : str = """*"""
_A : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_A : Dict = list(set(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = []
for minterm in minterms:
_A : Tuple = """"""
for _ in range(snake_case_ ):
_A : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = list(snake_case_ )
_A : Tuple = list(snake_case_ )
_A : Dict = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = []
_A : str = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_A : Union[str, Any] = 0
_A : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_A : Dict = j
if count == 1:
_A : int = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_A : int = 0
temp.append(prime_implicants[i] )
while True:
_A : Optional[Any] = 0
_A : Tuple = -1
_A : List[Any] = 0
for i in range(len(snake_case_ ) ):
_A : List[str] = chart[i].count(1 )
if count_n > max_n:
_A : Optional[int] = count_n
_A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_A : Optional[int] = 0
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i],binary[j],snake_case_ ):
_A : Union[str, Any] = 1
return chart
def lowerCAmelCase_ ( ):
_A : Dict = int(input("""Enter the no. of variables\n""" ) )
_A : Dict = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_A : int = decimal_to_binary(snake_case_,snake_case_ )
_A : Optional[Any] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_A : int = prime_implicant_chart(snake_case_,snake_case_ )
_A : int = selection(snake_case_,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 54
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( UpperCamelCase__,UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = StableDiffusionInstructPixaPixPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
_a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_a = IMAGE_TO_IMAGE_IMAGE_PARAMS
_a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self ) -> List[Any]:
torch.manual_seed(0 )
_A : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_A : Union[str, Any] = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
_A : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_A : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A : Union[str, Any] = CLIPTextModel(_a )
_A : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self , _a , _a=0 ) -> List[str]:
_A : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
_A : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Tuple = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" )
if str(_a ).startswith("""mps""" ):
_A : str = torch.manual_seed(_a )
else:
_A : Optional[Any] = torch.Generator(device=_a ).manual_seed(_a )
_A : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> int:
_A : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Tuple = self.get_dummy_components()
_A : Dict = StableDiffusionInstructPixaPixPipeline(**_a )
_A : Optional[Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_A : Tuple = self.get_dummy_inputs(_a )
_A : Tuple = sd_pipe(**_a ).images
_A : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A : List[str] = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ ( self ) -> List[str]:
_A : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Optional[Any] = self.get_dummy_components()
_A : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**_a )
_A : Union[str, Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_A : List[str] = self.get_dummy_inputs(_a )
_A : Optional[int] = """french fries"""
_A : Optional[int] = sd_pipe(**_a , negative_prompt=_a )
_A : Optional[Any] = output.images
_A : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A : str = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ ( self ) -> Dict:
_A : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : int = self.get_dummy_components()
_A : Any = StableDiffusionInstructPixaPixPipeline(**_a )
_A : Optional[Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_A : int = self.get_dummy_inputs(_a )
_A : Dict = [inputs["""prompt"""]] * 2
_A : Optional[int] = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
_A : Any = torch.from_numpy(_a ).unsqueeze(0 ).to(_a )
_A : Tuple = image / 2 + 0.5
_A : int = image.permute(0 , 3 , 1 , 2 )
_A : Optional[Any] = image.repeat(2 , 1 , 1 , 1 )
_A : Optional[Any] = sd_pipe(**_a ).images
_A : Tuple = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_A : Optional[int] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ ( self ) -> Union[str, Any]:
_A : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Optional[Any] = self.get_dummy_components()
_A : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
_A : Optional[int] = StableDiffusionInstructPixaPixPipeline(**_a )
_A : Tuple = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_A : Tuple = self.get_dummy_inputs(_a )
_A : List[str] = sd_pipe(**_a ).images
_A : Dict = image[0, -3:, -3:, -1]
_A : Any = [round(_a , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(_a ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
_A : Dict = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self ) -> int:
_A : int = self.get_dummy_components()
_A : List[str] = StableDiffusionInstructPixaPixPipeline(**_a )
_A : Optional[int] = VaeImageProcessor(do_resize=_a , do_normalize=_a )
_A : Optional[Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : str = pipe(**self.get_dummy_inputs_by_type(_a , input_image_type="""pt""" ) )[0]
_A : Any = components["""vae"""]
_A : Any = self.get_dummy_inputs_by_type(_a , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_A : List[str] = vae.encode(inputs[image_param] ).latent_dist.mode()
_A : Dict = pipe(**_a )[0]
_A : Any = np.abs(out - out_latents_inputs ).max()
self.assertLess(_a , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self , _a=0 ) -> List[str]:
_A : Optional[int] = torch.manual_seed(_a )
_A : Dict = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
_A : int = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> Tuple:
_A : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_A : int = self.get_inputs()
_A : Any = pipe(**_a ).images
_A : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_A : str = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def a__ ( self ) -> List[Any]:
_A : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a )
_A : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_A : Union[str, Any] = self.get_inputs()
_A : int = pipe(**_a ).images
_A : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_A : int = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def a__ ( self ) -> Dict:
_A : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a )
_A : Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_A : Optional[Any] = self.get_inputs()
_A : Tuple = pipe(**_a ).images
_A : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_A : Any = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def a__ ( self ) -> str:
_A : str = 0
def callback_fn(_a , _a , _a ) -> None:
_A : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_A : List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_A : Dict = latents[0, -3:, -3:, -1]
_A : Union[str, Any] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_A : List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_A : Any = latents[0, -3:, -3:, -1]
_A : Optional[int] = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_A : int = False
_A : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a , torch_dtype=torch.floataa )
_A : Optional[Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_A : List[str] = self.get_inputs()
pipe(**_a , callback=_a , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def a__ ( self ) -> int:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a , torch_dtype=torch.floataa )
_A : str = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A : Any = self.get_inputs()
_A : Tuple = pipe(**_a )
_A : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def a__ ( self ) -> Optional[Any]:
_A : Tuple = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_A : str = inputs["""image"""].resize((504, 504) )
_A : List[Any] = """timbrooks/instruct-pix2pix"""
_A : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_a , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_A : str = pipe(**_a )
_A : str = output.images[0]
_A : Any = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
_A : List[str] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 721
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
_A : Tuple = word_bank or []
# create a table
_A : int = len(snake_case_ ) + 1
_A : list[list[list[str]]] = []
for _ in range(snake_case_ ):
table.append([] )
# seed value
_A : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 54
| 0
|
from ...configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE : List[Any] = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "tapas"
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1024 , UpperCamelCase__=[3, 256, 256, 2, 256, 256, 10] , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__=1_0.0 , UpperCamelCase__=0 , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=1.0 , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=1.0 , UpperCamelCase__=1.0 , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__="ratio" , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=64 , UpperCamelCase__=32 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
A__ : Dict = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : str = num_hidden_layers
A__ : Optional[Any] = num_attention_heads
A__ : Dict = hidden_act
A__ : Dict = intermediate_size
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : Tuple = max_position_embeddings
A__ : List[Any] = type_vocab_sizes
A__ : Any = initializer_range
A__ : int = layer_norm_eps
# Fine-tuning task hyperparameters
A__ : int = positive_label_weight
A__ : Union[str, Any] = num_aggregation_labels
A__ : Dict = aggregation_loss_weight
A__ : Optional[int] = use_answer_as_supervision
A__ : str = answer_loss_importance
A__ : Tuple = use_normalized_answer_loss
A__ : str = huber_loss_delta
A__ : List[Any] = temperature
A__ : List[str] = aggregation_temperature
A__ : Dict = use_gumbel_for_cells
A__ : int = use_gumbel_for_aggregation
A__ : Union[str, Any] = average_approximation_function
A__ : Optional[int] = cell_selection_preference
A__ : Optional[int] = answer_loss_cutoff
A__ : str = max_num_rows
A__ : Optional[Any] = max_num_columns
A__ : Optional[int] = average_logits_per_cell
A__ : Union[str, Any] = select_one_column
A__ : List[str] = allow_empty_column_selection
A__ : Tuple = init_cell_selection_weights_to_zero
A__ : str = reset_position_index_per_cell
A__ : Optional[int] = disable_per_token_loss
# Aggregation hyperparameters
A__ : List[str] = aggregation_labels
A__ : Optional[int] = no_aggregation_label_index
if isinstance(self.aggregation_labels , UpperCamelCase__ ):
A__ : Union[str, Any] = {int(UpperCamelCase__ ): v for k, v in aggregation_labels.items()}
| 55
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
# TODO Update this
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] = vocab_size
A__ : int = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : str = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : List[str] = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : int = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : Optional[int] = emb_layer_norm_before
A__ : List[str] = token_dropout
A__ : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
A__ : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = EsmFoldConfig(**UpperCamelCase__ )
A__ : int = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
A__ : Any = get_default_vocab_list()
else:
A__ : Dict = vocab_list
else:
A__ : Optional[Any] = None
A__ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __snake_case ( self ):
A__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 0
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.trunk is None:
A__ : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A__ : List[Any] = TrunkConfig(**self.trunk )
def __snake_case ( self ):
A__ : Optional[int] = asdict(self )
A__ : int = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 48
_lowerCAmelCase = 1_024
_lowerCAmelCase = 128
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = False
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.structure_module is None:
A__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A__ : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : Tuple = self.sequence_state_dim // self.sequence_head_width
A__ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __snake_case ( self ):
A__ : List[Any] = asdict(self )
A__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 384
_lowerCAmelCase = 128
_lowerCAmelCase = 16
_lowerCAmelCase = 128
_lowerCAmelCase = 12
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 0.1
_lowerCAmelCase = 8
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 7
_lowerCAmelCase = 10
_lowerCAmelCase = 1e-8
_lowerCAmelCase = 1e5
def __snake_case ( self ):
return asdict(self )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.