code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__a = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__a = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
__a = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase ( self : Dict ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def lowerCamelCase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , ):
snake_case__ : Optional[Any] = len(references[0] )
if any(len(snake_case_ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
snake_case__ : Dict = [[refs[i] for refs in references] for i in range(snake_case_ )]
snake_case__ : Dict = TER(
normalized=snake_case_ , no_punct=snake_case_ , asian_support=snake_case_ , case_sensitive=snake_case_ , )
snake_case__ : Union[str, Any] = sb_ter.corpus_score(snake_case_ , snake_case_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 35
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
snake_case__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = """"""
else:
snake_case__ : Dict = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[int] = in_proj_bias[: config.hidden_size]
snake_case__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Tuple = in_proj_bias[-config.hidden_size :]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : str = dct.pop(_lowerCAmelCase )
snake_case__ : Tuple = val
def __snake_case( ) -> Tuple:
snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : Union[str, Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : int = 1_000
snake_case__ : Any = """huggingface/label-files"""
snake_case__ : Optional[Any] = """imagenet-1k-id2label.json"""
snake_case__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[str] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = int(deit_name[-6:-4] )
snake_case__ : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ : Tuple = 192
snake_case__ : Union[str, Any] = 768
snake_case__ : Tuple = 12
snake_case__ : Union[str, Any] = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ : str = 384
snake_case__ : Any = 1_536
snake_case__ : str = 12
snake_case__ : int = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ : Union[str, Any] = 1_024
snake_case__ : Any = 4_096
snake_case__ : List[Any] = 24
snake_case__ : Tuple = 16
# load original model from timm
snake_case__ : List[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[Any] = timm_model.state_dict()
snake_case__ : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
snake_case__ : Optional[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : List[Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : Optional[Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size )
snake_case__ : str = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ : Optional[Any] = encoding["""pixel_values"""]
snake_case__ : Tuple = model(_lowerCAmelCase )
snake_case__ : Optional[int] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 35
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : List[Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
snake_case__ : Optional[int] = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(snake_case_ ) , torch_builtin(snake_case_ ) ) )
self.assertFalse(torch.allclose(gelu_python(snake_case_ ) , gelu_new(snake_case_ ) ) )
def lowerCamelCase ( self : int ):
snake_case__ : int = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
snake_case__ : Union[str, Any] = get_activation("""gelu""" )
snake_case__ : Optional[Any] = get_activation("""gelu_10""" )
snake_case__ : Any = torch_builtin(snake_case_ )
snake_case__ : Any = geluaa(snake_case_ )
snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(snake_case_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCamelCase ( self : Union[str, Any] ):
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(snake_case_ ):
get_activation("""bogus""" )
with self.assertRaises(snake_case_ ):
get_activation(snake_case_ )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Optional[Any] = get_activation("""gelu""" )
snake_case__ : Tuple = 1
snake_case__ : Dict = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(snake_case_ ):
snake_case__ : Union[str, Any] = acta.a
| 35
|
'''simple docstring'''
import string
from math import logaa
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : List[str] = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
snake_case__ : List[str] = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int, int]:
snake_case__ : Dict = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
snake_case__ : Any = corpus_without_punctuation.split("""\n""" )
snake_case__ : int = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_lowerCAmelCase ))
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float:
return round(tf * idf , 3 )
| 35
| 1
|
'''simple docstring'''
from typing import Any
import numpy as np
def __snake_case( _lowerCAmelCase ) -> bool:
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
snake_case__ : int = v.conjugate().T
snake_case__ : Optional[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __snake_case( ) -> None:
snake_case__ : int = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
snake_case__ : List[str] = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f"{a} is not hermitian."
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
snake_case__ : Optional[int] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f"{a} is not hermitian."
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 35
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : int , snake_case_ : Tuple , snake_case_ : List[str]=3 , snake_case_ : Tuple=32 , snake_case_ : List[Any]=3 , snake_case_ : List[str]=10 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : Tuple=[1, 1, 2, 1] , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : int="relu" , snake_case_ : List[Any]=3 , snake_case_ : str=None , ):
snake_case__ : List[Any] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : int = image_size
snake_case__ : List[Any] = num_channels
snake_case__ : Optional[Any] = embeddings_size
snake_case__ : Optional[int] = hidden_sizes
snake_case__ : Tuple = depths
snake_case__ : Any = is_training
snake_case__ : Optional[int] = use_labels
snake_case__ : Optional[int] = hidden_act
snake_case__ : Optional[int] = num_labels
snake_case__ : int = scope
snake_case__ : Tuple = len(snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : int ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ):
snake_case__ : Optional[Any] = TFResNetModel(config=snake_case_ )
snake_case__ : int = model(snake_case_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] ):
snake_case__ : str = self.num_labels
snake_case__ : Optional[int] = TFResNetForImageClassification(snake_case_ )
snake_case__ : Tuple = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Tuple ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs
snake_case__ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowercase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Tuple = TFResNetModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowerCamelCase ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : str ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase ( self : int ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase ( self : List[Any] ):
pass
def lowerCamelCase ( self : List[Any] ):
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(snake_case_ )
snake_case__ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase ( self : List[str] ):
def check_hidden_states_output(snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] ):
snake_case__ : List[Any] = model_class(snake_case_ )
snake_case__ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
snake_case__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ : Dict = layer_type
snake_case__ : Optional[int] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[Any] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def lowerCamelCase ( self : Optional[Any] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFResNetModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def __snake_case( ) -> Optional[int]:
snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase ( self : List[Any] ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : List[Any] = self.default_image_processor
snake_case__ : List[Any] = prepare_img()
snake_case__ : List[str] = image_processor(images=snake_case_ , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[Any] = model(**snake_case_ )
# verify the logits
snake_case__ : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case_ , atol=1E-4 ) )
| 35
| 1
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = 0
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : List[str] = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Optional[int] = Path(snake_case_ ) / """preprocessor_config.json"""
snake_case__ : List[str] = Path(snake_case_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case_ , """w""" ) )
snake_case__ : Union[str, Any] = AutoImageProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Union[str, Any] = Path(snake_case_ ) / """preprocessor_config.json"""
snake_case__ : Dict = Path(snake_case_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case_ , """w""" ) )
snake_case__ : Tuple = AutoImageProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Optional[Any] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case__ : List[str] = Path(snake_case_ ) / """preprocessor_config.json"""
snake_case__ : List[Any] = Path(snake_case_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case_ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case__ : Any = AutoImageProcessor.from_pretrained(snake_case_ ).to_dict()
config_dict.pop("""image_processor_type""" )
snake_case__ : Tuple = CLIPImageProcessor(**snake_case_ )
# save in new folder
model_config.save_pretrained(snake_case_ )
config.save_pretrained(snake_case_ )
snake_case__ : Union[str, Any] = AutoImageProcessor.from_pretrained(snake_case_ )
# make sure private variable is not incorrectly saved
snake_case__ : Tuple = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Tuple = Path(snake_case_ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case_ , """w""" ) , )
snake_case__ : List[Any] = AutoImageProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[int] ):
with self.assertRaisesRegex(
snake_case_ , """clip-base is not a local folder and is not a valid model identifier""" ):
snake_case__ : Union[str, Any] = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCamelCase ( self : Any ):
with self.assertRaisesRegex(
snake_case_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case__ : List[str] = AutoImageProcessor.from_pretrained(snake_case_ , revision="""aaaaaa""" )
def lowerCamelCase ( self : Any ):
with self.assertRaisesRegex(
snake_case_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
snake_case__ : Optional[int] = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase ( self : List[str] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case_ ):
snake_case__ : List[str] = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case_ ):
snake_case__ : str = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case_ )
snake_case__ : Tuple = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case_ )
snake_case__ : Tuple = AutoImageProcessor.from_pretrained(snake_case_ , trust_remote_code=snake_case_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCamelCase ( self : str ):
try:
AutoConfig.register("""custom""" , snake_case_ )
AutoImageProcessor.register(snake_case_ , snake_case_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case_ ):
AutoImageProcessor.register(snake_case_ , snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : int = Path(snake_case_ ) / """preprocessor_config.json"""
snake_case__ : int = Path(snake_case_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case_ , """w""" ) )
snake_case__ : Dict = CustomImageProcessor.from_pretrained(snake_case_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case_ )
snake_case__ : int = AutoImageProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase ( self : Tuple ):
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = True
try:
AutoConfig.register("""custom""" , snake_case_ )
AutoImageProcessor.register(snake_case_ , snake_case_ )
# If remote code is not set, the default is to use local
snake_case__ : int = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case__ : int = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case__ : Dict = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(snake_case_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 35
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "glpn"
def __init__( self : Optional[Any] , snake_case_ : List[str]=3 , snake_case_ : Dict=4 , snake_case_ : List[Any]=[2, 2, 2, 2] , snake_case_ : int=[8, 4, 2, 1] , snake_case_ : List[str]=[32, 64, 160, 256] , snake_case_ : Tuple=[7, 3, 3, 3] , snake_case_ : List[Any]=[4, 2, 2, 2] , snake_case_ : Tuple=[1, 2, 5, 8] , snake_case_ : List[str]=[4, 4, 4, 4] , snake_case_ : Optional[int]="gelu" , snake_case_ : Dict=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : List[Any]=0.02 , snake_case_ : Tuple=0.1 , snake_case_ : Any=1E-6 , snake_case_ : Dict=64 , snake_case_ : Tuple=10 , snake_case_ : List[Any]=-1 , **snake_case_ : Optional[Any] , ):
super().__init__(**snake_case_ )
snake_case__ : Optional[Any] = num_channels
snake_case__ : Dict = num_encoder_blocks
snake_case__ : Tuple = depths
snake_case__ : Union[str, Any] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : Optional[Any] = patch_sizes
snake_case__ : int = strides
snake_case__ : List[Any] = mlp_ratios
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : str = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : Tuple = decoder_hidden_size
snake_case__ : List[Any] = max_depth
snake_case__ : Dict = head_in_index
| 35
| 1
|
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__a = 5_0000
__a = 5000
__a , __a = os.path.split(__file__)
__a = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
for i in range(_lowerCAmelCase ):
snake_case__ : str = dataset[i]
@get_duration
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
for i in range(0 , len(_lowerCAmelCase ) , _lowerCAmelCase ):
snake_case__ : Optional[Any] = dataset[i : i + batch_size]
@get_duration
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
with dataset.formatted_as(type=_lowerCAmelCase ):
for i in range(_lowerCAmelCase ):
snake_case__ : Tuple = dataset[i]
@get_duration
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
with dataset.formatted_as(type=_lowerCAmelCase ):
for i in range(0 , _lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : Tuple = dataset[i : i + batch_size]
def __snake_case( ) -> str:
snake_case__ : List[str] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
snake_case__ : str = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
snake_case__ : Union[str, Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
snake_case__ : Tuple = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
snake_case__ : Optional[int] = generate_example_dataset(
os.path.join(_lowerCAmelCase , """dataset.arrow""" ) , _lowerCAmelCase , num_examples=_lowerCAmelCase , seq_shapes={"""list""": (100,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(_lowerCAmelCase ) )
snake_case__ : int = func(_lowerCAmelCase , **_lowerCAmelCase )
print("""shuffling dataset""" )
snake_case__ : Optional[Any] = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(_lowerCAmelCase ) )
snake_case__ : Dict = func(
_lowerCAmelCase , **_lowerCAmelCase )
with open(_lowerCAmelCase , """wb""" ) as f:
f.write(json.dumps(_lowerCAmelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 35
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__a = logging.get_logger(__name__)
__a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
__a = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
__a = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = RoFormerTokenizer
def __init__( self : List[Any] , snake_case_ : List[str]=None , snake_case_ : Dict=None , snake_case_ : Any=True , snake_case_ : str="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : Optional[Any]="[PAD]" , snake_case_ : Union[str, Any]="[CLS]" , snake_case_ : Union[str, Any]="[MASK]" , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=None , **snake_case_ : Tuple , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
snake_case__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , snake_case_ ) != strip_accents
):
snake_case__ : str = getattr(snake_case_ , pre_tok_state.pop("""type""" ) )
snake_case__ : Optional[int] = do_lower_case
snake_case__ : Union[str, Any] = strip_accents
snake_case__ : Union[str, Any] = pre_tok_class(**snake_case_ )
snake_case__ : str = do_lower_case
def __getstate__( self : int ):
snake_case__ : List[Any] = self.__dict__.copy()
snake_case__ : str = BertPreTokenizer()
return state
def __setstate__( self : Dict , snake_case_ : Dict ):
snake_case__ : List[Any] = d
snake_case__ : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab()
snake_case__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) )
def lowerCamelCase ( self : str , snake_case_ : Optional[Any] , snake_case_ : List[str]=None ):
snake_case__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
snake_case__ : int = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Optional[str] = None ):
snake_case__ : Union[str, Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCamelCase ( self : Dict , snake_case_ : List[str] , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ):
snake_case__ : Optional[Any] = BertPreTokenizer()
return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
| 35
| 1
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__a = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
__a = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
__a = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def lowerCamelCase ( self : Tuple , snake_case_ : int , snake_case_ : Tuple , snake_case_ : List[Any]=None , snake_case_ : Any=False , snake_case_ : Optional[int]=False , snake_case_ : str=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case__ : int = np.array([re.sub(snake_case_ , """""" , snake_case_ ) for x in predictions] )
snake_case__ : Any = np.array([re.sub(snake_case_ , """""" , snake_case_ ) for x in references] )
else:
snake_case__ : int = np.asarray(snake_case_ )
snake_case__ : Optional[int] = np.asarray(snake_case_ )
if ignore_case:
snake_case__ : Dict = np.char.lower(snake_case_ )
snake_case__ : Tuple = np.char.lower(snake_case_ )
if ignore_punctuation:
snake_case__ : str = string.punctuation.maketrans("""""" , """""" , string.punctuation )
snake_case__ : Tuple = np.char.translate(snake_case_ , table=snake_case_ )
snake_case__ : Dict = np.char.translate(snake_case_ , table=snake_case_ )
if ignore_numbers:
snake_case__ : Any = string.digits.maketrans("""""" , """""" , string.digits )
snake_case__ : Union[str, Any] = np.char.translate(snake_case_ , table=snake_case_ )
snake_case__ : Union[str, Any] = np.char.translate(snake_case_ , table=snake_case_ )
snake_case__ : Tuple = predictions == references
return {"exact_match": np.mean(snake_case_ ) * 100}
| 35
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : int = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ : Dict = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ : List[str] = 0.01
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
snake_case__ : str = time.time()
locka.acquire(_lowerCAmelCase )
assert time.time() - _start > timeout
def __snake_case( _lowerCAmelCase ) -> Tuple:
snake_case__ : Dict = """a""" * 1_000 + """.lock"""
snake_case__ : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(_lowerCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
snake_case__ : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
locka.acquire(0 )
| 35
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "beit"
def __init__( self : List[str] , snake_case_ : Union[str, Any]=8_192 , snake_case_ : int=768 , snake_case_ : Any=12 , snake_case_ : int=12 , snake_case_ : List[Any]=3_072 , snake_case_ : Optional[int]="gelu" , snake_case_ : str=0.0 , snake_case_ : Any=0.0 , snake_case_ : int=0.02 , snake_case_ : int=1E-1_2 , snake_case_ : Union[str, Any]=224 , snake_case_ : str=16 , snake_case_ : List[str]=3 , snake_case_ : List[str]=False , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=False , snake_case_ : List[Any]=False , snake_case_ : List[str]=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : Dict=True , snake_case_ : List[str]=[3, 5, 7, 11] , snake_case_ : Optional[Any]=[1, 2, 3, 6] , snake_case_ : str=True , snake_case_ : List[str]=0.4 , snake_case_ : List[Any]=256 , snake_case_ : str=1 , snake_case_ : Dict=False , snake_case_ : Optional[int]=255 , **snake_case_ : Optional[Any] , ):
super().__init__(**snake_case_ )
snake_case__ : Any = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Optional[int] = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : List[Any] = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : List[Any] = image_size
snake_case__ : List[Any] = patch_size
snake_case__ : List[Any] = num_channels
snake_case__ : Union[str, Any] = use_mask_token
snake_case__ : List[str] = use_absolute_position_embeddings
snake_case__ : List[Any] = use_relative_position_bias
snake_case__ : List[str] = use_shared_relative_position_bias
snake_case__ : Dict = layer_scale_init_value
snake_case__ : Optional[int] = drop_path_rate
snake_case__ : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ : Optional[int] = out_indices
snake_case__ : str = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ : Dict = use_auxiliary_head
snake_case__ : Any = auxiliary_loss_weight
snake_case__ : int = auxiliary_channels
snake_case__ : Optional[int] = auxiliary_num_convs
snake_case__ : Optional[int] = auxiliary_concat_input
snake_case__ : Tuple = semantic_loss_ignore_index
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = version.parse("1.11" )
@property
def lowerCamelCase ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase ( self : Dict ):
return 1E-4
| 35
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
snake_case__ : str = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __snake_case( ) -> List[str]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , snake_case_ : Tuple , snake_case_ : Dict=13 , snake_case_ : Optional[Any]=7 , snake_case_ : List[Any]=True , snake_case_ : int=True , snake_case_ : Optional[Any]=True , snake_case_ : Optional[Any]=True , snake_case_ : Optional[Any]=99 , snake_case_ : Any=32 , snake_case_ : List[Any]=5 , snake_case_ : Union[str, Any]=4 , snake_case_ : Any=37 , snake_case_ : int="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : str=0.1 , snake_case_ : str=512 , snake_case_ : int=16 , snake_case_ : Dict=2 , snake_case_ : Dict=0.02 , snake_case_ : List[Any]=4 , ):
snake_case__ : List[Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Tuple = seq_length
snake_case__ : Dict = is_training
snake_case__ : List[Any] = use_attention_mask
snake_case__ : Optional[int] = use_token_type_ids
snake_case__ : Union[str, Any] = use_labels
snake_case__ : Optional[Any] = vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : Tuple = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : List[Any] = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Any = type_sequence_label_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : int = num_choices
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : int = None
if self.use_attention_mask:
snake_case__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : str = None
if self.use_token_type_ids:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Optional[int] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase ( self : str ):
snake_case__ : Tuple = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple = config_and_inputs
snake_case__ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = True
lowercase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase ( self : int ):
for model_class_name in self.all_model_classes:
snake_case__ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=snake_case_ )
snake_case__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Dict = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case__ : Dict = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case__ : List[str] = model(snake_case_ )[0]
snake_case__ : Union[str, Any] = 50_000
snake_case__ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , snake_case_ )
snake_case__ : Tuple = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
| 35
|
'''simple docstring'''
__a = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__a = frozenset(["prompt", "negative_prompt"])
__a = frozenset([])
__a = frozenset(["image"])
__a = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["image"])
__a = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__a = frozenset(["prompt", "image", "negative_prompt"])
__a = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__a = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__a = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["image", "mask_image"])
__a = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["example_image", "image", "mask_image"])
__a = frozenset(["class_labels"])
__a = frozenset(["class_labels"])
__a = frozenset(["batch_size"])
__a = frozenset([])
__a = frozenset(["batch_size"])
__a = frozenset([])
__a = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__a = frozenset(["prompt", "negative_prompt"])
__a = frozenset(["input_tokens"])
__a = frozenset(["input_tokens"])
| 35
| 1
|
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __snake_case( _lowerCAmelCase ) -> str:
return "".join(sorted(_lowerCAmelCase ) )
def __snake_case( _lowerCAmelCase ) -> list[str]:
return word_by_signature[signature(_lowerCAmelCase )]
__a = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__a = sorted({word.strip().lower() for word in data.splitlines()})
__a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 35
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = GPTSanJapaneseTokenizer
lowercase = False
lowercase = {"do_clean_text": False, "add_prefix_space": False}
def lowerCamelCase ( self : str ):
super().setUp()
# fmt: off
snake_case__ : Optional[Any] = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
snake_case__ : List[Any] = {"""unk_token""": """<unk>"""}
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(snake_case_ ) )
def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : str ):
snake_case__ : Union[str, Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def lowerCamelCase ( self : Any , snake_case_ : Dict ):
snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ )
snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ )
return text, ids
def lowerCamelCase ( self : Optional[Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Union[str, Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : List[str] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : int = """こんにちは、世界。 こんばんは、㔺界。"""
snake_case__ : Optional[int] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
snake_case__ : Dict = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids without special tokens
snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids with special tokens
snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token]
snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : Union[str, Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
snake_case__ : Optional[int] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
snake_case__ : Any = tokenizer.encode(snake_case_ )
snake_case__ : int = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Tuple = """こんにちは、世界。"""
snake_case__ : Optional[Any] = """こんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。こんばんは、世界。😀"""
snake_case__ : Dict = tokenizer.encode(prefix_text + input_text )
snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ )
snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ )
snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ )
snake_case__ : str = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Dict = """こんにちは、世界。"""
snake_case__ : Optional[int] = """こんばんは、㔺界。😀"""
snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1)
snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0]
snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids
snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : Union[str, Any] = tokenizer.encode("""あンいワ""" )
snake_case__ : int = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
snake_case__ : Dict = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ )
snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ )
# fmt: off
snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , snake_case_ )
self.assertListEqual(x_token.token_type_ids , snake_case_ )
self.assertListEqual(x_token.attention_mask , snake_case_ )
self.assertListEqual(x_token_a.input_ids , snake_case_ )
self.assertListEqual(x_token_a.token_type_ids , snake_case_ )
self.assertListEqual(x_token_a.attention_mask , snake_case_ )
def lowerCamelCase ( self : Any ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase ( self : List[str] ):
# tokenizer has no padding token
pass
| 35
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase ) -> List[List[ImageInput]]:
if isinstance(_lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_lowerCAmelCase ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = ["pixel_values"]
def __init__( self : str , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 255 , snake_case_ : bool = True , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , **snake_case_ : Union[str, Any] , ):
super().__init__(**snake_case_ )
snake_case__ : Dict = size if size is not None else {"""shortest_edge""": 224}
snake_case__ : Optional[int] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
snake_case__ : List[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
snake_case__ : Dict = get_size_dict(snake_case_ , param_name="""crop_size""" )
snake_case__ : int = do_resize
snake_case__ : int = size
snake_case__ : Dict = do_center_crop
snake_case__ : List[str] = crop_size
snake_case__ : List[str] = resample
snake_case__ : Dict = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : Union[str, Any] = do_normalize
snake_case__ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase ( self : Tuple , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : List[str] , ):
snake_case__ : Optional[int] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" in size:
snake_case__ : int = get_resize_output_image_size(snake_case_ , size["""shortest_edge"""] , default_to_square=snake_case_ )
elif "height" in size and "width" in size:
snake_case__ : str = (size["""height"""], size["""width"""])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase ( self : str , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : List[str] , ):
snake_case__ : Optional[Any] = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(snake_case_ , size=(size["""height"""], size["""width"""]) , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : str , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : np.ndarray , snake_case_ : Union[float, List[float]] , snake_case_ : Union[float, List[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : List[str] , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case__ : Union[str, Any] = to_numpy_array(snake_case_ )
if do_resize:
snake_case__ : Any = self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ )
if do_center_crop:
snake_case__ : Dict = self.center_crop(snake_case_ , size=snake_case_ )
if do_rescale:
snake_case__ : Optional[Any] = self.rescale(image=snake_case_ , scale=snake_case_ )
if do_normalize:
snake_case__ : Any = self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ )
snake_case__ : List[Any] = to_channel_dimension_format(snake_case_ , snake_case_ )
return image
def lowerCamelCase ( self : List[str] , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : Optional[int] , ):
snake_case__ : List[str] = do_resize if do_resize is not None else self.do_resize
snake_case__ : str = resample if resample is not None else self.resample
snake_case__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : str = image_mean if image_mean is not None else self.image_mean
snake_case__ : Union[str, Any] = image_std if image_std is not None else self.image_std
snake_case__ : List[str] = size if size is not None else self.size
snake_case__ : str = get_size_dict(snake_case_ , default_to_square=snake_case_ )
snake_case__ : List[str] = crop_size if crop_size is not None else self.crop_size
snake_case__ : str = get_size_dict(snake_case_ , param_name="""crop_size""" )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
snake_case__ : Tuple = make_batched(snake_case_ )
snake_case__ : int = [
[
self._preprocess_image(
image=snake_case_ , do_resize=snake_case_ , size=snake_case_ , resample=snake_case_ , do_center_crop=snake_case_ , crop_size=snake_case_ , do_rescale=snake_case_ , rescale_factor=snake_case_ , do_normalize=snake_case_ , image_mean=snake_case_ , image_std=snake_case_ , data_format=snake_case_ , )
for img in video
]
for video in videos
]
snake_case__ : Union[str, Any] = {"""pixel_values""": videos}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 35
|
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = CustomTokenizer
pass
| 35
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCamelCase ( self : List[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self : List[str] ):
snake_case__ : List[Any] = ort.SessionOptions()
snake_case__ : List[Any] = False
return options
def lowerCamelCase ( self : str ):
snake_case__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
snake_case__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
snake_case__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : List[Any] = """A red cat sitting on a park bench"""
snake_case__ : Optional[Any] = np.random.RandomState(0 )
snake_case__ : Optional[int] = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : Optional[Any] = output.images
snake_case__ : Dict = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
snake_case__ : Optional[int] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase ( self : List[str] ):
snake_case__ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
snake_case__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
snake_case__ : Tuple = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
snake_case__ : Optional[int] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : List[Any] = """A red cat sitting on a park bench"""
snake_case__ : Optional[int] = np.random.RandomState(0 )
snake_case__ : Optional[int] = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : Tuple = output.images
snake_case__ : Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
snake_case__ : Union[str, Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 35
|
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Optional[Any] = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase )
snake_case__ : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : int ):
snake_case__ : Optional[int] = {}
if "second_text" in kwargs:
snake_case__ : Union[str, Any] = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ):
return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework )
def lowerCamelCase ( self : List[Any] , snake_case_ : Dict ):
return self.model(**snake_case_ )
def lowerCamelCase ( self : int , snake_case_ : List[Any] ):
snake_case__ : Union[str, Any] = model_outputs.logits[0].numpy()
snake_case__ : List[str] = softmax(snake_case_ )
snake_case__ : List[str] = np.argmax(snake_case_ )
snake_case__ : List[str] = self.model.config.idalabel[best_class]
snake_case__ : Optional[int] = probabilities[best_class].item()
snake_case__ : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 35
| 1
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
snake_case__ : Tuple = len(_lowerCAmelCase )
snake_case__ : Any = max(_lowerCAmelCase )
snake_case__ : List[Any] = min(_lowerCAmelCase )
# create the counting array
snake_case__ : Optional[int] = coll_max + 1 - coll_min
snake_case__ : Optional[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , _lowerCAmelCase ):
snake_case__ : Dict = counting_arr[i] + counting_arr[i - 1]
# create the output collection
snake_case__ : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , _lowerCAmelCase ) ):
snake_case__ : Optional[Any] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __snake_case( _lowerCAmelCase ) -> str:
return "".join([chr(_lowerCAmelCase ) for i in counting_sort([ord(_lowerCAmelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__a = input("Enter numbers separated by a comma:\n").strip()
__a = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 35
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __snake_case( _lowerCAmelCase ) -> Any:
for i in range(0 , _lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def __snake_case( _lowerCAmelCase ) -> List[str]:
for i in range(_lowerCAmelCase , 0 , -1 ):
for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(_lowerCAmelCase ) # upper half
reverse_floyd(_lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
__a = 1
while K:
__a = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__a = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 35
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase = 1_000 ) -> int:
return sum(e for e in range(3 , _lowerCAmelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 35
| 1
|
'''simple docstring'''
import numpy as np
def __snake_case( _lowerCAmelCase ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def __snake_case( _lowerCAmelCase ) -> np.ndarray:
return vector * sigmoid(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
| 1
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> str:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
snake_case__ : List[str] = False
if num < 0:
snake_case__ : Optional[int] = True
snake_case__ : Optional[int] = -num
snake_case__ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
|
'''simple docstring'''
from PIL import Image
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Image:
def brightness(_lowerCAmelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
__a = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 35
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = ["model.decoder.embed_positions.weights"]
def __snake_case( _lowerCAmelCase ) -> Any:
if "emb" in name:
snake_case__ : int = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
snake_case__ : int = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
snake_case__ : Optional[int] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
snake_case__ : Union[str, Any] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
snake_case__ : List[Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
snake_case__ : int = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
snake_case__ : Any = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
snake_case__ : int = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
snake_case__ : str = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
snake_case__ : Tuple = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case__ : int = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[Dict, Dict]:
snake_case__ : Any = list(state_dict.keys() )
snake_case__ : Tuple = {}
for key in keys:
snake_case__ : Tuple = state_dict.pop(_lowerCAmelCase )
snake_case__ : List[Any] = rename_keys(_lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case__ : List[Any] = val[:hidden_size, :]
snake_case__ : List[Any] = val[hidden_size : 2 * hidden_size, :]
snake_case__ : Dict = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case__ : Union[str, Any] = val
else:
snake_case__ : int = val
return state_dict, enc_dec_proj_state_dict
def __snake_case( _lowerCAmelCase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
snake_case__ : Dict = 1_024
snake_case__ : Tuple = 24
snake_case__ : int = 16
elif checkpoint == "medium":
snake_case__ : List[str] = 1_536
snake_case__ : List[Any] = 48
snake_case__ : int = 24
elif checkpoint == "large":
snake_case__ : Optional[Any] = 2_048
snake_case__ : Optional[int] = 48
snake_case__ : List[Any] = 32
else:
raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
snake_case__ : List[Any] = MusicgenDecoderConfig(
hidden_size=_lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_lowerCAmelCase , num_attention_heads=_lowerCAmelCase , )
return config
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="cpu" ) -> Any:
snake_case__ : List[str] = MusicGen.get_pretrained(_lowerCAmelCase , device=_lowerCAmelCase )
snake_case__ : Any = decoder_config_from_checkpoint(_lowerCAmelCase )
snake_case__ : int = fairseq_model.lm.state_dict()
snake_case__ , snake_case__ : List[Any] = rename_state_dict(
_lowerCAmelCase , hidden_size=decoder_config.hidden_size )
snake_case__ : int = TaEncoderModel.from_pretrained("""t5-base""" )
snake_case__ : Dict = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
snake_case__ : str = MusicgenForCausalLM(_lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case__ , snake_case__ : Tuple = decoder.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" )
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
snake_case__ : Tuple = MusicgenForConditionalGeneration(text_encoder=_lowerCAmelCase , audio_encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_lowerCAmelCase )
# check we can do a forward pass
snake_case__ : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case__ : List[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case__ : Optional[int] = model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" )
snake_case__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
snake_case__ : Tuple = MusicgenProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
# set the appropriate bos/pad token ids
snake_case__ : Dict = 2_048
snake_case__ : Optional[int] = 2_048
# set other default generation config params
snake_case__ : Tuple = int(30 * audio_encoder.config.frame_rate )
snake_case__ : Tuple = True
snake_case__ : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if repo_id:
logger.info(f"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(_lowerCAmelCase )
processor.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
__a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 35
|
'''simple docstring'''
import argparse
import os
import re
__a = "src/transformers"
# Pattern that looks at the indentation in a line.
__a = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
__a = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__a = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
__a = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__a = re.compile(R"\[([^\]]+)\]")
def __snake_case( _lowerCAmelCase ) -> List[Any]:
snake_case__ : int = _re_indent.search(_lowerCAmelCase )
return "" if search is None else search.groups()[0]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
snake_case__ : str = 0
snake_case__ : Union[str, Any] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCAmelCase ):
index += 1
snake_case__ : Tuple = ["""\n""".join(lines[:index] )]
else:
snake_case__ : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case__ : Optional[int] = [lines[index]]
index += 1
while index < len(_lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCAmelCase ) )
if index < len(_lowerCAmelCase ) - 1:
snake_case__ : str = [lines[index + 1]]
index += 1
else:
snake_case__ : int = []
else:
blocks.append("""\n""".join(_lowerCAmelCase ) )
snake_case__ : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCAmelCase ) > 0:
blocks.append("""\n""".join(_lowerCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCAmelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __snake_case( _lowerCAmelCase ) -> Tuple:
def _inner(_lowerCAmelCase ):
return key(_lowerCAmelCase ).lower().replace("""_""" , """""" )
return _inner
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(_lowerCAmelCase ):
return x
if key is None:
snake_case__ : Optional[int] = noop
# Constants are all uppercase, they go first.
snake_case__ : Optional[int] = [obj for obj in objects if key(_lowerCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case__ : int = [obj for obj in objects if key(_lowerCAmelCase )[0].isupper() and not key(_lowerCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case__ : str = [obj for obj in objects if not key(_lowerCAmelCase )[0].isupper()]
snake_case__ : List[str] = ignore_underscore(_lowerCAmelCase )
return sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> int:
# This inner function sort imports between [ ].
def _replace(_lowerCAmelCase ):
snake_case__ : Union[str, Any] = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
snake_case__ : int = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : List[str] = keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) + "]"
snake_case__ : str = import_statement.split("""\n""" )
if len(_lowerCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case__ : Dict = 2 if lines[1].strip() == """[""" else 1
snake_case__ : str = [(i, _re_strip_line.search(_lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case__ : str = sort_objects(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )
snake_case__ : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
snake_case__ : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : List[str] = keys[:-1]
snake_case__ : int = get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] )
return "\n".join(_lowerCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
snake_case__ : Optional[Any] = _re_bracket_content.sub(_replace , _lowerCAmelCase )
return import_statement
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=True ) -> Dict:
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
snake_case__ : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case__ : Optional[int] = split_code_in_indented_blocks(
_lowerCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case__ : Optional[Any] = main_blocks[block_idx]
snake_case__ : Dict = block.split("""\n""" )
# Get to the start of the imports.
snake_case__ : Dict = 0
while line_idx < len(_lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case__ : Union[str, Any] = len(_lowerCAmelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case__ : List[str] = """\n""".join(block_lines[line_idx:-1] )
snake_case__ : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case__ : Optional[int] = split_code_in_indented_blocks(_lowerCAmelCase , indent_level=_lowerCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case__ : Tuple = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case__ : Optional[Any] = [(pattern.search(_lowerCAmelCase ).groups()[0] if pattern.search(_lowerCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case__ : Dict = [(i, key) for i, key in enumerate(_lowerCAmelCase ) if key is not None]
snake_case__ : Union[str, Any] = [x[0] for x in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case__ : List[Any] = 0
snake_case__ : Optional[Any] = []
for i in range(len(_lowerCAmelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
snake_case__ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_lowerCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
snake_case__ : Dict = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCAmelCase ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(_lowerCAmelCase ) )
def __snake_case( _lowerCAmelCase=True ) -> Tuple:
snake_case__ : str = []
for root, _, files in os.walk(_lowerCAmelCase ):
if "__init__.py" in files:
snake_case__ : Union[str, Any] = sort_imports(os.path.join(_lowerCAmelCase , """__init__.py""" ) , check_only=_lowerCAmelCase )
if result:
snake_case__ : Union[str, Any] = [os.path.join(_lowerCAmelCase , """__init__.py""" )]
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"Would overwrite {len(_lowerCAmelCase )} files, run `make style`." )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__a = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 35
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["GLPNFeatureExtractor"]
__a = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
| 1
|
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__a = 2_9979_2458
# Symbols
__a , __a , __a , __a = symbols("ct x y z")
def __snake_case( _lowerCAmelCase ) -> float:
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def __snake_case( _lowerCAmelCase ) -> float:
return 1 / sqrt(1 - beta(_lowerCAmelCase ) ** 2 )
def __snake_case( _lowerCAmelCase ) -> np.ndarray:
return np.array(
[
[gamma(_lowerCAmelCase ), -gamma(_lowerCAmelCase ) * beta(_lowerCAmelCase ), 0, 0],
[-gamma(_lowerCAmelCase ) * beta(_lowerCAmelCase ), gamma(_lowerCAmelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
snake_case__ : List[Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_lowerCAmelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__a = transform(2997_9245)
print("Example of four vector: ")
print(F"ct' = {four_vector[0]}")
print(F"x' = {four_vector[1]}")
print(F"y' = {four_vector[2]}")
print(F"z' = {four_vector[3]}")
# Substitute symbols with numerical values
__a = {ct: c, x: 1, y: 1, z: 1}
__a = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"\n{numerical_vector}")
| 35
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__a = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
snake_case__ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
snake_case__ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
snake_case__ : int = value
elif weight_type == "weight_g":
snake_case__ : List[str] = value
elif weight_type == "weight_v":
snake_case__ : List[str] = value
elif weight_type == "bias":
snake_case__ : Optional[Any] = value
else:
snake_case__ : str = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
snake_case__ : Union[str, Any] = []
snake_case__ : Dict = fairseq_model.state_dict()
snake_case__ : List[Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case__ : Optional[int] = None
for name, value in fairseq_dict.items():
snake_case__ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case__ : Union[str, Any] = True
elif name.split(""".""" )[0] == "proj":
snake_case__ : Tuple = fairseq_model.proj
snake_case__ : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case__ : Optional[Any] = True
if "*" in mapped_key:
snake_case__ : Optional[int] = name.split(_lowerCAmelCase )[0].split(""".""" )[-2]
snake_case__ : Tuple = mapped_key.replace("""*""" , _lowerCAmelCase )
if "weight_g" in name:
snake_case__ : str = """weight_g"""
elif "weight_v" in name:
snake_case__ : int = """weight_v"""
elif "bias" in name:
snake_case__ : Dict = """bias"""
elif "weight" in name:
snake_case__ : Union[str, Any] = """weight"""
else:
snake_case__ : Union[str, Any] = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
snake_case__ : int = full_name.split("""conv_layers.""" )[-1]
snake_case__ : Dict = name.split(""".""" )
snake_case__ : Any = int(items[0] )
snake_case__ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
snake_case__ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
snake_case__ : str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
snake_case__ : Union[str, Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
snake_case__ : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ , snake_case__ : str = emb.weight.shape
snake_case__ : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
snake_case__ : List[str] = emb.weight.data
return lin_layer
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
snake_case__ : int = f.readlines()
snake_case__ : List[Any] = [line.split(""" """ )[0] for line in lines]
snake_case__ : Union[str, Any] = len(_lowerCAmelCase )
snake_case__ : Any = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> int:
snake_case__ : Optional[Any] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
snake_case__ : Optional[Any] = SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase )
snake_case__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
# set weights for wav2vec2 encoder
snake_case__ : Optional[Any] = WavaVecaModel(_lowerCAmelCase )
snake_case__ : Dict = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
snake_case__ : Optional[Any] = SpeechaTextaForCausalLM(_lowerCAmelCase )
snake_case__ , snake_case__ : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
snake_case__ : Tuple = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
snake_case__ : List[Any] = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
snake_case__ : Tuple = False
# add projection layer
snake_case__ : Union[str, Any] = nn.Parameter(projection_layer.weight )
snake_case__ : int = nn.Parameter(projection_layer.bias )
snake_case__ : Tuple = create_vocab_dict(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , """vocab.json""" ) , """w""" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Tuple = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , """vocab.json""" ) )
tokenizer.save_pretrained(_lowerCAmelCase )
snake_case__ : Optional[Any] = hf_wavavec.config.to_dict()
snake_case__ : Tuple = tokenizer.pad_token_id
snake_case__ : Optional[Any] = tokenizer.bos_token_id
snake_case__ : int = tokenizer.eos_token_id
snake_case__ : str = """speech_to_text_2"""
snake_case__ : List[Any] = """wav2vec2"""
snake_case__ : List[str] = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 35
| 1
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 35
|
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Optional[int] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f"{test_file} instead." )
snake_case__ : Dict = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
snake_case__ : int = components[:-1] + [test_fn.replace(""".py""" , """""" )]
snake_case__ : int = """.""".join(_lowerCAmelCase )
return test_module_path
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ : str = get_module_path(_lowerCAmelCase )
snake_case__ : Union[str, Any] = importlib.import_module(_lowerCAmelCase )
return test_module
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : List[Any] = []
snake_case__ : Optional[int] = get_test_module(_lowerCAmelCase )
for attr in dir(_lowerCAmelCase ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : List[str] = []
snake_case__ : Any = get_test_module(_lowerCAmelCase )
for attr in dir(_lowerCAmelCase ):
snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
snake_case__ : List[str] = getattr(_lowerCAmelCase , """all_model_classes""" , [] )
if len(_lowerCAmelCase ) > 0:
test_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : Any = get_test_classes(_lowerCAmelCase )
snake_case__ : Optional[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
snake_case__ : Optional[int] = test_class()
if hasattr(_lowerCAmelCase , """setUp""" ):
test.setUp()
snake_case__ : Any = None
if hasattr(_lowerCAmelCase , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
snake_case__ : Tuple = test.model_tester.__class__
return model_tester
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
snake_case__ : Union[str, Any] = get_test_classes(_lowerCAmelCase )
snake_case__ : str = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
snake_case__ : Optional[Any] = get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Union[str, Any] = []
for test_class in test_classes:
snake_case__ : Tuple = get_model_tester_from_test_class(_lowerCAmelCase )
if tester_class is not None:
tester_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
snake_case__ : Optional[Any] = get_test_classes(_lowerCAmelCase )
snake_case__ : Union[str, Any] = {test_class: get_model_tester_from_test_class(_lowerCAmelCase ) for test_class in test_classes}
return test_tester_mapping
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Any = get_model_classes(_lowerCAmelCase )
snake_case__ : Any = {
model_class: get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes
}
return model_test_mapping
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Union[str, Any] = get_model_classes(_lowerCAmelCase )
snake_case__ : str = {
model_class: get_tester_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def __snake_case( _lowerCAmelCase ) -> int:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return o
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return o.__name__
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return [to_json(_lowerCAmelCase ) for x in o]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {to_json(_lowerCAmelCase ): to_json(_lowerCAmelCase ) for k, v in o.items()}
else:
return o
| 35
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "swinv2"
lowercase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any , snake_case_ : int=224 , snake_case_ : List[Any]=4 , snake_case_ : List[Any]=3 , snake_case_ : Optional[Any]=96 , snake_case_ : str=[2, 2, 6, 2] , snake_case_ : Tuple=[3, 6, 12, 24] , snake_case_ : Optional[Any]=7 , snake_case_ : List[str]=4.0 , snake_case_ : Optional[int]=True , snake_case_ : Any=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Any="gelu" , snake_case_ : Optional[Any]=False , snake_case_ : List[str]=0.02 , snake_case_ : Dict=1E-5 , snake_case_ : Optional[int]=32 , **snake_case_ : Dict , ):
super().__init__(**snake_case_ )
snake_case__ : Optional[int] = image_size
snake_case__ : Union[str, Any] = patch_size
snake_case__ : Optional[int] = num_channels
snake_case__ : str = embed_dim
snake_case__ : List[str] = depths
snake_case__ : int = len(snake_case_ )
snake_case__ : Union[str, Any] = num_heads
snake_case__ : Tuple = window_size
snake_case__ : str = mlp_ratio
snake_case__ : Optional[Any] = qkv_bias
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : Optional[Any] = drop_path_rate
snake_case__ : Tuple = hidden_act
snake_case__ : str = use_absolute_embeddings
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[int] = initializer_range
snake_case__ : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ : List[str] = int(embed_dim * 2 ** (len(snake_case_ ) - 1) )
snake_case__ : Tuple = (0, 0, 0, 0)
| 35
|
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __snake_case( _lowerCAmelCase ) -> List[Any]:
snake_case__ : Dict = SwinConfig()
snake_case__ : Optional[Any] = swin_name.split("""_""" )
snake_case__ : Any = name_split[1]
snake_case__ : List[Any] = int(name_split[4] )
snake_case__ : int = int(name_split[3][-1] )
if model_size == "tiny":
snake_case__ : List[Any] = 96
snake_case__ : int = (2, 2, 6, 2)
snake_case__ : int = (3, 6, 12, 24)
elif model_size == "small":
snake_case__ : Union[str, Any] = 96
snake_case__ : Optional[Any] = (2, 2, 18, 2)
snake_case__ : str = (3, 6, 12, 24)
elif model_size == "base":
snake_case__ : Dict = 128
snake_case__ : str = (2, 2, 18, 2)
snake_case__ : Dict = (4, 8, 16, 32)
else:
snake_case__ : List[str] = 192
snake_case__ : str = (2, 2, 18, 2)
snake_case__ : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
snake_case__ : str = 21_841
else:
snake_case__ : List[str] = 1_000
snake_case__ : int = """huggingface/label-files"""
snake_case__ : Any = """imagenet-1k-id2label.json"""
snake_case__ : List[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : Dict = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : Optional[int] = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case__ : List[Any] = img_size
snake_case__ : Dict = num_classes
snake_case__ : Dict = embed_dim
snake_case__ : Optional[int] = depths
snake_case__ : int = num_heads
snake_case__ : Optional[int] = window_size
return config
def __snake_case( _lowerCAmelCase ) -> Dict:
if "patch_embed.proj" in name:
snake_case__ : List[str] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case__ : int = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
snake_case__ : str = """encoder.""" + name
if "attn.proj" in name:
snake_case__ : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
snake_case__ : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
snake_case__ : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case__ : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case__ : Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
snake_case__ : Tuple = """layernorm.weight"""
if name == "norm.bias":
snake_case__ : Union[str, Any] = """layernorm.bias"""
if "head" in name:
snake_case__ : Optional[int] = name.replace("""head""" , """classifier""" )
else:
snake_case__ : List[str] = """swin.""" + name
return name
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case__ : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
snake_case__ : Dict = key.split(""".""" )
snake_case__ : Optional[int] = int(key_split[1] )
snake_case__ : Union[str, Any] = int(key_split[3] )
snake_case__ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case__ : Optional[Any] = val[:dim, :]
snake_case__ : Tuple = val[
dim : dim * 2, :
]
snake_case__ : Dict = val[-dim:, :]
else:
snake_case__ : Tuple = val[
:dim
]
snake_case__ : int = val[
dim : dim * 2
]
snake_case__ : int = val[
-dim:
]
else:
snake_case__ : Union[str, Any] = val
return orig_state_dict
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : Optional[int] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
snake_case__ : Optional[int] = get_swin_config(_lowerCAmelCase )
snake_case__ : Optional[Any] = SwinForImageClassification(_lowerCAmelCase )
model.eval()
snake_case__ : str = convert_state_dict(timm_model.state_dict() , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
snake_case__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Dict = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
snake_case__ : Dict = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
snake_case__ : Optional[int] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
snake_case__ : Optional[Any] = timm_model(inputs["""pixel_values"""] )
snake_case__ : str = model(**_lowerCAmelCase ).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print(f"Saving model {swin_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 35
| 1
|
UpperCAmelCase__ = tuple[float, float, float]
UpperCAmelCase__ = tuple[float, float, float]
def _a ( a :Pointad , a :Pointad ) -> Vectorad:
a = end_pointa[0] - end_pointa[0]
a = end_pointa[1] - end_pointa[1]
a = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _a ( a :Vectorad , a :Vectorad ) -> Vectorad:
a = ab[1] * ac[2] - ab[2] * ac[1] # *i
a = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
a = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _a ( a :Vectorad , a :int ) -> bool:
return tuple(round(a , a ) for x in vector ) == (0, 0, 0)
def _a ( a :Pointad , a :Pointad , a :Pointad , a :int = 10 ) -> bool:
a = create_vector(a , a )
a = create_vector(a , a )
return is_zero_vector(get_ad_vectors_cross(a , a ) , a )
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__a = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : List[str] , *snake_case_ : str , **snake_case_ : List[str] ):
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 35
| 0
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Optional[Any] ={'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_: List[Any] ={
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class __A ( UpperCamelCase__ ):
a__ : int = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Any = ["""input_ids""", """attention_mask"""]
a__ : Any = None
def __init__(self : Optional[int] , __a : Optional[int]=None , __a : Union[str, Any]=None , __a : Dict=None , __a : List[Any]="<unk>" , __a : Union[str, Any]="<s>" , __a : Any="</s>" , __a : int="<pad>" , __a : str=False , __a : str=False , **__a : int , ):
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , add_prefix_space=__a , clean_up_tokenization_spaces=__a , **__a , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __a ) != add_prefix_space:
UpperCAmelCase_ = getattr(__a , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**__a )
UpperCAmelCase_ = add_prefix_space
def _lowercase (self : Tuple , *__a : Optional[Any] , **__a : str ):
UpperCAmelCase_ = kwargs.get("is_split_into_words" , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
" pretokenized inputs." )
return super()._batch_encode_plus(*__a , **__a )
def _lowercase (self : Tuple , *__a : Tuple , **__a : int ):
UpperCAmelCase_ = kwargs.get("is_split_into_words" , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
" pretokenized inputs." )
return super()._encode_plus(*__a , **__a )
def _lowercase (self : Optional[int] , __a : str , __a : Optional[str] = None ):
UpperCAmelCase_ = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def _lowercase (self : Optional[int] , __a : "Conversation" ):
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
| 1
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__a = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = field(default=_a , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=_a , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=_a , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=_a , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=_a , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def lowerCamelCase ( self : List[str] ):
snake_case__ : int = super().to_dict()
for k, v in d.items():
if isinstance(snake_case_ , snake_case_ ):
snake_case__ : Optional[int] = v.to_dict()
return d
| 35
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Any = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
snake_case__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = """"""
else:
snake_case__ : Dict = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[int] = in_proj_bias[: config.hidden_size]
snake_case__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Tuple = in_proj_bias[-config.hidden_size :]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : str = dct.pop(_lowerCAmelCase )
snake_case__ : Tuple = val
def __snake_case( ) -> Tuple:
snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : Union[str, Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : int = 1_000
snake_case__ : Any = """huggingface/label-files"""
snake_case__ : Optional[Any] = """imagenet-1k-id2label.json"""
snake_case__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[str] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = int(deit_name[-6:-4] )
snake_case__ : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ : Tuple = 192
snake_case__ : Union[str, Any] = 768
snake_case__ : Tuple = 12
snake_case__ : Union[str, Any] = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ : str = 384
snake_case__ : Any = 1_536
snake_case__ : str = 12
snake_case__ : int = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ : Union[str, Any] = 1_024
snake_case__ : Any = 4_096
snake_case__ : List[Any] = 24
snake_case__ : Tuple = 16
# load original model from timm
snake_case__ : List[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[Any] = timm_model.state_dict()
snake_case__ : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
snake_case__ : Optional[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : List[Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : Optional[Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size )
snake_case__ : str = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ : Optional[Any] = encoding["""pixel_values"""]
snake_case__ : Tuple = model(_lowerCAmelCase )
snake_case__ : Optional[int] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 35
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class A ( __snake_case ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__magic_name__ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__magic_name__ = Features({'''text''': Value('''string''' )} )
__magic_name__ = Features({'''summary''': Value('''string''' )} )
__magic_name__ = "text"
__magic_name__ = "summary"
@property
def __lowerCAmelCase ( self ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 3
|
'''simple docstring'''
import string
from math import logaa
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : List[str] = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
snake_case__ : List[str] = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int, int]:
snake_case__ : Dict = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
snake_case__ : Any = corpus_without_punctuation.split("""\n""" )
snake_case__ : int = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_lowerCAmelCase ))
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float:
return round(tf * idf , 3 )
| 35
| 0
|
'''simple docstring'''
from collections import Counter
from timeit import timeit
def a_ ( lowerCamelCase : str = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def a_ ( lowerCamelCase : str = "" ):
if len(lowerCamelCase ) == 0:
return True
lowerCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowerCAmelCase = {}
for character in lower_case_input_str:
lowerCAmelCase = character_freq_dict.get(lowerCamelCase , 0 ) + 1
lowerCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def a_ ( lowerCamelCase : str = "" ):
print('\nFor string = ' , lowerCamelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(lowerCamelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(lowerCamelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
__snake_case =input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
__snake_case =can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 4
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : int , snake_case_ : Tuple , snake_case_ : List[str]=3 , snake_case_ : Tuple=32 , snake_case_ : List[Any]=3 , snake_case_ : List[str]=10 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : Tuple=[1, 1, 2, 1] , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : int="relu" , snake_case_ : List[Any]=3 , snake_case_ : str=None , ):
snake_case__ : List[Any] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : int = image_size
snake_case__ : List[Any] = num_channels
snake_case__ : Optional[Any] = embeddings_size
snake_case__ : Optional[int] = hidden_sizes
snake_case__ : Tuple = depths
snake_case__ : Any = is_training
snake_case__ : Optional[int] = use_labels
snake_case__ : Optional[int] = hidden_act
snake_case__ : Optional[int] = num_labels
snake_case__ : int = scope
snake_case__ : Tuple = len(snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : int ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ):
snake_case__ : Optional[Any] = TFResNetModel(config=snake_case_ )
snake_case__ : int = model(snake_case_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] ):
snake_case__ : str = self.num_labels
snake_case__ : Optional[int] = TFResNetForImageClassification(snake_case_ )
snake_case__ : Tuple = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Tuple ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs
snake_case__ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowercase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Tuple = TFResNetModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowerCamelCase ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : str ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase ( self : int ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase ( self : List[Any] ):
pass
def lowerCamelCase ( self : List[Any] ):
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(snake_case_ )
snake_case__ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase ( self : List[str] ):
def check_hidden_states_output(snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] ):
snake_case__ : List[Any] = model_class(snake_case_ )
snake_case__ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
snake_case__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ : Dict = layer_type
snake_case__ : Optional[int] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[Any] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def lowerCamelCase ( self : Optional[Any] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFResNetModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def __snake_case( ) -> Optional[int]:
snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase ( self : List[Any] ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : List[Any] = self.default_image_processor
snake_case__ : List[Any] = prepare_img()
snake_case__ : List[str] = image_processor(images=snake_case_ , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[Any] = model(**snake_case_ )
# verify the logits
snake_case__ : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case_ , atol=1E-4 ) )
| 35
| 0
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Dict:
"""simple docstring"""
_lowercase =0
if start < end:
_lowercase =randint(__snake_case , __snake_case )
_lowercase =a[end]
_lowercase =a[pivot]
_lowercase =temp
_lowercase , _lowercase =_in_place_partition(__snake_case , __snake_case , __snake_case )
count += _in_place_quick_sort(__snake_case , __snake_case , p - 1 )
count += _in_place_quick_sort(__snake_case , p + 1 , __snake_case )
return count
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =0
_lowercase =randint(__snake_case , __snake_case )
_lowercase =a[end]
_lowercase =a[pivot]
_lowercase =temp
_lowercase =start - 1
for index in range(__snake_case , __snake_case ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_lowercase =new_pivot_index + 1
_lowercase =a[new_pivot_index]
_lowercase =a[index]
_lowercase =temp
_lowercase =a[new_pivot_index + 1]
_lowercase =a[end]
_lowercase =temp
return new_pivot_index + 1, count
UpperCAmelCase__ = TemporaryFile()
UpperCAmelCase__ = 100 # 1000 elements are to be sorted
UpperCAmelCase__ ,UpperCAmelCase__ = 0, 1 # mean and standard deviation
UpperCAmelCase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase__ = np.load(outfile)
UpperCAmelCase__ = len(M) - 1
UpperCAmelCase__ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 5
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "glpn"
def __init__( self : Optional[Any] , snake_case_ : List[str]=3 , snake_case_ : Dict=4 , snake_case_ : List[Any]=[2, 2, 2, 2] , snake_case_ : int=[8, 4, 2, 1] , snake_case_ : List[str]=[32, 64, 160, 256] , snake_case_ : Tuple=[7, 3, 3, 3] , snake_case_ : List[Any]=[4, 2, 2, 2] , snake_case_ : Tuple=[1, 2, 5, 8] , snake_case_ : List[str]=[4, 4, 4, 4] , snake_case_ : Optional[int]="gelu" , snake_case_ : Dict=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : List[Any]=0.02 , snake_case_ : Tuple=0.1 , snake_case_ : Any=1E-6 , snake_case_ : Dict=64 , snake_case_ : Tuple=10 , snake_case_ : List[Any]=-1 , **snake_case_ : Optional[Any] , ):
super().__init__(**snake_case_ )
snake_case__ : Optional[Any] = num_channels
snake_case__ : Dict = num_encoder_blocks
snake_case__ : Tuple = depths
snake_case__ : Union[str, Any] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : Optional[Any] = patch_sizes
snake_case__ : int = strides
snake_case__ : List[Any] = mlp_ratios
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : str = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : Tuple = decoder_hidden_size
snake_case__ : List[Any] = max_depth
snake_case__ : Dict = head_in_index
| 35
| 0
|
from sklearn.metrics import matthews_corrcoef
import datasets
A : int = '\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n'
A : Dict = '\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results[\'matthews_correlation\'], 2))\n -0.25\n'
A : Optional[int] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case=None ) -> Any:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(_snake_case , _snake_case , sample_weight=_snake_case ) ),
}
| 6
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__a = logging.get_logger(__name__)
__a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
__a = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
__a = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = RoFormerTokenizer
def __init__( self : List[Any] , snake_case_ : List[str]=None , snake_case_ : Dict=None , snake_case_ : Any=True , snake_case_ : str="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : Optional[Any]="[PAD]" , snake_case_ : Union[str, Any]="[CLS]" , snake_case_ : Union[str, Any]="[MASK]" , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=None , **snake_case_ : Tuple , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
snake_case__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , snake_case_ ) != strip_accents
):
snake_case__ : str = getattr(snake_case_ , pre_tok_state.pop("""type""" ) )
snake_case__ : Optional[int] = do_lower_case
snake_case__ : Union[str, Any] = strip_accents
snake_case__ : Union[str, Any] = pre_tok_class(**snake_case_ )
snake_case__ : str = do_lower_case
def __getstate__( self : int ):
snake_case__ : List[Any] = self.__dict__.copy()
snake_case__ : str = BertPreTokenizer()
return state
def __setstate__( self : Dict , snake_case_ : Dict ):
snake_case__ : List[Any] = d
snake_case__ : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab()
snake_case__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) )
def lowerCamelCase ( self : str , snake_case_ : Optional[Any] , snake_case_ : List[str]=None ):
snake_case__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
snake_case__ : int = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Optional[str] = None ):
snake_case__ : Union[str, Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCamelCase ( self : Dict , snake_case_ : List[str] , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ):
snake_case__ : Optional[Any] = BertPreTokenizer()
return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
| 35
| 0
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = OpenAIGPTTokenizer
lowerCamelCase = OpenAIGPTTokenizerFast
lowerCamelCase = True
lowerCamelCase = False
def snake_case__ ( self : List[Any] )-> str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
A__ = dict(zip(lowercase_,range(len(lowercase_ ) ) ) )
A__ = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(lowercase_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(lowercase_ ) )
def snake_case__ ( self : Optional[int],lowercase_ : int )-> Optional[int]:
'''simple docstring'''
return "lower newer", "lower newer"
def snake_case__ ( self : List[str] )-> Union[str, Any]:
'''simple docstring'''
A__ = OpenAIGPTTokenizer(self.vocab_file,self.merges_file )
A__ = 'lower'
A__ = ['low', 'er</w>']
A__ = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = tokens + ['<unk>']
A__ = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),lowercase_ )
def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int]=1_5 )-> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A__ = self.rust_tokenizer_class.from_pretrained(lowercase_,**lowercase_ )
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' )
# Simple input
self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' )
# Simple input
self.assertRaises(
lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',)
# Pair input
self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' )
# Pair input
self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' )
# Pair input
self.assertRaises(
lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',)
def snake_case__ ( self : List[str] )-> Optional[int]:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A ( _UpperCAmelCase ):
"""simple docstring"""
pass
| 7
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : int = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ : Dict = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ : List[str] = 0.01
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
snake_case__ : str = time.time()
locka.acquire(_lowerCAmelCase )
assert time.time() - _start > timeout
def __snake_case( _lowerCAmelCase ) -> Tuple:
snake_case__ : Dict = """a""" * 1_000 + """.lock"""
snake_case__ : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(_lowerCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
snake_case__ : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
locka.acquire(0 )
| 35
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_ = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 8
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
snake_case__ : str = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __snake_case( ) -> List[str]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
| 0
|
from math import ceil
def _UpperCamelCase ( lowercase__ = 1001 ):
__SCREAMING_SNAKE_CASE : Dict = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 2 * i + 1
__SCREAMING_SNAKE_CASE : Tuple = 2 * i
__SCREAMING_SNAKE_CASE : int = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__lowerCAmelCase : str =int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 9
|
'''simple docstring'''
__a = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__a = frozenset(["prompt", "negative_prompt"])
__a = frozenset([])
__a = frozenset(["image"])
__a = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["image"])
__a = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__a = frozenset(["prompt", "image", "negative_prompt"])
__a = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__a = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__a = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["image", "mask_image"])
__a = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["example_image", "image", "mask_image"])
__a = frozenset(["class_labels"])
__a = frozenset(["class_labels"])
__a = frozenset(["batch_size"])
__a = frozenset([])
__a = frozenset(["batch_size"])
__a = frozenset([])
__a = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__a = frozenset(["prompt", "negative_prompt"])
__a = frozenset(["input_tokens"])
__a = frozenset(["input_tokens"])
| 35
| 0
|
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCAmelCase_ ( __a ) -> Union[str, Any]:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def lowerCAmelCase_ ( __a , __a ) -> Dict:
"""simple docstring"""
return (-y * np.log(__a ) - (1 - y) * np.log(1 - h )).mean()
def lowerCAmelCase_ ( __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =np.dot(__a , __a )
return np.sum(y * scores - np.log(1 + np.exp(__a ) ) )
def lowerCAmelCase_ ( __a , __a , __a , __a=70000 ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =np.zeros(x.shape[1] )
for iterations in range(__a ):
lowerCamelCase__: Tuple =np.dot(__a , __a )
lowerCamelCase__: Union[str, Any] =sigmoid_function(__a )
lowerCamelCase__: Union[str, Any] =np.dot(x.T , h - y ) / y.size
lowerCamelCase__: Optional[Any] =theta - alpha * gradient # updating the weights
lowerCamelCase__: List[str] =np.dot(__a , __a )
lowerCamelCase__: List[str] =sigmoid_function(__a )
lowerCamelCase__: str =cost_function(__a , __a )
if iterations % 100 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__A = datasets.load_iris()
__A = iris.data[:, :2]
__A = (iris.target != 0) * 1
__A = 0.1
__A = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("theta: ", theta) # printing the theta i.e our weights vector
def lowerCAmelCase_ ( __a ) -> Union[str, Any]:
"""simple docstring"""
return sigmoid_function(
np.dot(__a , __a ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((__A) , (__A)) = (x[:, 0].min(), x[:, 0].max())
((__A) , (__A)) = (x[:, 1].min(), x[:, 1].max())
((__A) , (__A)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__A = np.c_[xxa.ravel(), xxa.ravel()]
__A = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 10
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = GPTSanJapaneseTokenizer
lowercase = False
lowercase = {"do_clean_text": False, "add_prefix_space": False}
def lowerCamelCase ( self : str ):
super().setUp()
# fmt: off
snake_case__ : Optional[Any] = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
snake_case__ : List[Any] = {"""unk_token""": """<unk>"""}
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(snake_case_ ) )
def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : str ):
snake_case__ : Union[str, Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def lowerCamelCase ( self : Any , snake_case_ : Dict ):
snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ )
snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ )
return text, ids
def lowerCamelCase ( self : Optional[Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Union[str, Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : List[str] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : int = """こんにちは、世界。 こんばんは、㔺界。"""
snake_case__ : Optional[int] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
snake_case__ : Dict = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids without special tokens
snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids with special tokens
snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token]
snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : Union[str, Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
snake_case__ : Optional[int] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
snake_case__ : Any = tokenizer.encode(snake_case_ )
snake_case__ : int = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Tuple = """こんにちは、世界。"""
snake_case__ : Optional[Any] = """こんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。こんばんは、世界。😀"""
snake_case__ : Dict = tokenizer.encode(prefix_text + input_text )
snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ )
snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ )
snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ )
snake_case__ : str = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Dict = """こんにちは、世界。"""
snake_case__ : Optional[int] = """こんばんは、㔺界。😀"""
snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1)
snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0]
snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids
snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : Union[str, Any] = tokenizer.encode("""あンいワ""" )
snake_case__ : int = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
snake_case__ : Dict = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ )
snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ )
# fmt: off
snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , snake_case_ )
self.assertListEqual(x_token.token_type_ids , snake_case_ )
self.assertListEqual(x_token.attention_mask , snake_case_ )
self.assertListEqual(x_token_a.input_ids , snake_case_ )
self.assertListEqual(x_token_a.token_type_ids , snake_case_ )
self.assertListEqual(x_token_a.attention_mask , snake_case_ )
def lowerCamelCase ( self : Any ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase ( self : List[str] ):
# tokenizer has no padding token
pass
| 35
| 0
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = None , **__lowerCamelCase , ) -> Any:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
_A : Optional[Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase) else {self.split: path_or_paths}
_A : str = Text(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( self) -> Any:
# Build iterable dataset
if self.streaming:
_A : int = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_A : Tuple = None
_A : Tuple = None
_A : Tuple = None
_A : str = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
_A : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory)
return dataset
| 11
|
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = CustomTokenizer
pass
| 35
| 0
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = 1
__lowerCamelCase = 3
__lowerCamelCase = (32, 32)
__lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
@property
def lowerCAmelCase__ ( self: List[str] ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowerCAmelCase__ ( self: Any ):
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self: Tuple ):
def extract(*UpperCamelCase_: Tuple , **UpperCamelCase_: Union[str, Any] ):
class lowerCamelCase__:
def __init__( self: Dict ):
__lowerCamelCase = torch.ones([0] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[Any] ):
self.pixel_values.to(UpperCamelCase_ )
return self
return Out()
return extract
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.dummy_cond_unet
__lowerCamelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
__lowerCamelCase = self.dummy_vae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__lowerCamelCase = StableDiffusionPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = """A painting of a squirrel eating a burger"""
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__lowerCamelCase = sd_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__lowerCamelCase = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=UpperCamelCase_ , )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.dummy_cond_unet
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
__lowerCamelCase = self.dummy_vae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__lowerCamelCase = StableDiffusionPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = """A painting of a squirrel eating a burger"""
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__lowerCamelCase = sd_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__lowerCamelCase = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=UpperCamelCase_ , )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(pipe.scheduler , UpperCamelCase_ )
assert pipe.safety_checker is None
__lowerCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = StableDiffusionPipeline.from_pretrained(UpperCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowerCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.dummy_cond_unet
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
__lowerCamelCase = self.dummy_vae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
__lowerCamelCase = unet.half()
__lowerCamelCase = vae.half()
__lowerCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
__lowerCamelCase = StableDiffusionPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = """A painting of a squirrel eating a burger"""
__lowerCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=UpperCamelCase_ )
__lowerCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
__lowerCamelCase = 40_03_66_03_46
__lowerCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=UpperCamelCase_ )
__lowerCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity"""
__lowerCamelCase = 27_34_97_17_55
__lowerCamelCase = 7
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
__lowerCamelCase = 10_44_35_52_34
__lowerCamelCase = 12
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 12
|
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Optional[Any] = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase )
snake_case__ : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : int ):
snake_case__ : Optional[int] = {}
if "second_text" in kwargs:
snake_case__ : Union[str, Any] = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ):
return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework )
def lowerCamelCase ( self : List[Any] , snake_case_ : Dict ):
return self.model(**snake_case_ )
def lowerCamelCase ( self : int , snake_case_ : List[Any] ):
snake_case__ : Union[str, Any] = model_outputs.logits[0].numpy()
snake_case__ : List[str] = softmax(snake_case_ )
snake_case__ : List[str] = np.argmax(snake_case_ )
snake_case__ : List[str] = self.model.config.idalabel[best_class]
snake_case__ : Optional[int] = probabilities[best_class].item()
snake_case__ : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 35
| 0
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=True ):
model.train()
SCREAMING_SNAKE_CASE_: Any = model(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = F.mse_loss(_UpperCAmelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_UpperCAmelCase )
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
set_seed(42 )
SCREAMING_SNAKE_CASE_: Tuple = RegressionModel()
SCREAMING_SNAKE_CASE_: Optional[int] = deepcopy(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE_: Union[str, Any] = DataLoader(_UpperCAmelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE_: List[Any] = AdamW(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_: str = AdamW(params=ddp_model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_: Union[str, Any] = LambdaLR(_UpperCAmelCase , lr_lambda=lambda _UpperCAmelCase : epoch**0.6_5 )
SCREAMING_SNAKE_CASE_: List[Any] = LambdaLR(_UpperCAmelCase , lr_lambda=lambda _UpperCAmelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def A_ ( _UpperCAmelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = get_training_setup(_UpperCAmelCase )
# Use a single batch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = next(iter(_UpperCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_UpperCAmelCase ):
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
# Sync grads
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE_: Optional[Any] = ddp_input[torch.randperm(len(_UpperCAmelCase ) )]
def A_ ( _UpperCAmelCase ):
# Test on distributed setup that context manager behaves properly
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = get_training_setup(_UpperCAmelCase )
# Use a single batch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = next(iter(_UpperCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_UpperCAmelCase ):
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
# Sync grads
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE_: Any = ddp_input[torch.randperm(len(_UpperCAmelCase ) )]
def A_ ( _UpperCAmelCase=False , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_: List[str] = Accelerator(
split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = get_training_setup(_UpperCAmelCase )
for iteration, batch in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_UpperCAmelCase ):
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_UpperCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE_: Dict = ddp_input[torch.randperm(len(_UpperCAmelCase ) )]
GradientState._reset_state()
def A_ ( _UpperCAmelCase=False , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_: List[Any] = Accelerator(
split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = get_training_setup(_UpperCAmelCase , _UpperCAmelCase )
for iteration, batch in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_UpperCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_UpperCAmelCase ):
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
SCREAMING_SNAKE_CASE_: Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_UpperCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[Any] = Accelerator()
SCREAMING_SNAKE_CASE_: Dict = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader(_UpperCAmelCase , batch_size=16 )
SCREAMING_SNAKE_CASE_: Any = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader(_UpperCAmelCase , batch_size=16 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_UpperCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_UpperCAmelCase )
if iteration < len(_UpperCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_UpperCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_UpperCAmelCase )
if batch_num < len(_UpperCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def A_ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = Accelerator()
SCREAMING_SNAKE_CASE_: int = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_UpperCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_UpperCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(_UpperCAmelCase , _UpperCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(_UpperCAmelCase , _UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 13
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __snake_case( _lowerCAmelCase ) -> Any:
for i in range(0 , _lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def __snake_case( _lowerCAmelCase ) -> List[str]:
for i in range(_lowerCAmelCase , 0 , -1 ):
for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(_lowerCAmelCase ) # upper half
reverse_floyd(_lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
__a = 1
while K:
__a = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__a = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 35
| 0
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {"""vocab_file""": """vocab.txt"""}
_lowerCamelCase : List[Any] = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
_lowerCamelCase : List[str] = {
"""openbmb/cpm-ant-10b""": 1024,
}
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = collections.OrderedDict()
with open(lowercase_ , '''r''' , encoding='''utf-8''' ) as reader:
A__ = reader.readlines()
for index, token in enumerate(lowercase_ ):
A__ = token.rstrip('''\n''' )
A__ = index
return vocab
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : Union[str, Any]=200) ->List[str]:
'''simple docstring'''
A__ = vocab
A__ = unk_token
A__ = max_input_chars_per_word
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Optional[int]) ->Optional[int]:
'''simple docstring'''
A__ = list(UpperCAmelCase__)
if len(UpperCAmelCase__) > self.max_input_chars_per_word:
return [self.unk_token]
A__ = 0
A__ = []
while start < len(UpperCAmelCase__):
A__ = len(UpperCAmelCase__)
A__ = None
while start < end:
A__ = ''''''.join(chars[start:end])
if substr in self.vocab:
A__ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(UpperCAmelCase__)
A__ = end
return sub_tokens
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = False
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]="<d>" , UpperCAmelCase__ : Union[str, Any]="</d>" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Optional[int]="</s>" , UpperCAmelCase__ : Optional[int]="<pad>" , UpperCAmelCase__ : Dict="<unk>" , UpperCAmelCase__ : Optional[Any]="</n>" , UpperCAmelCase__ : List[Any]="</_>" , UpperCAmelCase__ : Optional[Any]="left" , **UpperCAmelCase__ : Union[str, Any] , ) ->Dict:
'''simple docstring'''
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=UpperCAmelCase__ , eod_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , line_token=UpperCAmelCase__ , space_token=UpperCAmelCase__ , padding_side=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = bod_token
A__ = eod_token
A__ = load_vocab(UpperCAmelCase__)
A__ = self.encoder[space_token]
A__ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCAmelCase__: x[1]))
A__ = {v: k for k, v in self.encoder.items()}
A__ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
'''simple docstring'''
return self.encoder["\n"]
@property
def SCREAMING_SNAKE_CASE ( self : int) ->int:
'''simple docstring'''
return len(self.encoder)
def SCREAMING_SNAKE_CASE ( self : Dict) ->str:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = []
for x in jieba.cut(UpperCAmelCase__ , cut_all=UpperCAmelCase__):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCAmelCase__))
return output_tokens
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Dict) ->List[str]:
'''simple docstring'''
A__ = [i for i in token_ids if i >= 0]
A__ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Union[str, Any]) ->Tuple:
'''simple docstring'''
return token in self.encoder
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[str]) ->str:
'''simple docstring'''
return "".join(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : int) ->Union[str, Any]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase__ , self.encoder.get(self.unk_token))
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List[str]) ->Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase__ , self.unk_token)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if os.path.isdir(UpperCAmelCase__):
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
A__ = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
A__ = 0
if " " in self.encoder:
A__ = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
A__ = self.encoder['''\n''']
del self.encoder["\n"]
A__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCAmelCase__: x[1]))
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''')
A__ = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] = None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__)
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase__)) + [1] + ([0] * len(UpperCAmelCase__))
return [1] + ([0] * len(UpperCAmelCase__))
| 14
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase = 1_000 ) -> int:
return sum(e for e in range(3 , _lowerCAmelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 35
| 0
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :str = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "time_series_transformer"
snake_case_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Tuple ,A : Optional[int] = None ,A : Optional[int] = None ,A : str = "student_t" ,A : str = "nll" ,A : int = 1 ,A : List[int] = [1, 2, 3, 4, 5, 6, 7] ,A : Optional[Union[str, bool]] = "mean" ,A : int = 0 ,A : int = 0 ,A : int = 0 ,A : int = 0 ,A : Optional[List[int]] = None ,A : Optional[List[int]] = None ,A : int = 32 ,A : int = 32 ,A : int = 2 ,A : int = 2 ,A : int = 2 ,A : int = 2 ,A : bool = True ,A : str = "gelu" ,A : int = 64 ,A : float = 0.1 ,A : float = 0.1 ,A : float = 0.1 ,A : float = 0.1 ,A : float = 0.1 ,A : int = 1_00 ,A : float = 0.02 ,A : Union[str, Any]=True ,**A : Optional[int] ,):
# time series specific configuration
__A = prediction_length
__A = context_length or prediction_length
__A = distribution_output
__A = loss
__A = input_size
__A = num_time_features
__A = lags_sequence
__A = scaling
__A = num_dynamic_real_features
__A = num_static_real_features
__A = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(A ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__A = cardinality
else:
__A = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(A ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__A = embedding_dimension
else:
__A = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
__A = num_parallel_samples
# Transformer architecture configuration
__A = input_size * len(A ) + self._number_of_features
__A = d_model
__A = encoder_attention_heads
__A = decoder_attention_heads
__A = encoder_ffn_dim
__A = decoder_ffn_dim
__A = encoder_layers
__A = decoder_layers
__A = dropout
__A = attention_dropout
__A = activation_dropout
__A = encoder_layerdrop
__A = decoder_layerdrop
__A = activation_function
__A = init_std
__A = use_cache
super().__init__(is_encoder_decoder=A ,**A )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 15
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
| 0
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase_ = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
lowerCAmelCase_ = {
'ctrl': 256,
}
lowerCAmelCase_ = {
'Pregnancy': 168_629,
'Christianity': 7_675,
'Explain': 106_423,
'Fitness': 63_440,
'Saving': 63_163,
'Ask': 27_171,
'Ass': 95_985,
'Joke': 163_509,
'Questions': 45_622,
'Thoughts': 49_605,
'Retail': 52_342,
'Feminism': 164_338,
'Writing': 11_992,
'Atheism': 192_263,
'Netflix': 48_616,
'Computing': 39_639,
'Opinion': 43_213,
'Alone': 44_967,
'Funny': 58_917,
'Gaming': 40_358,
'Human': 4_088,
'India': 1_331,
'Joker': 77_138,
'Diet': 36_206,
'Legal': 11_859,
'Norman': 4_939,
'Tip': 72_689,
'Weight': 52_343,
'Movies': 46_273,
'Running': 23_425,
'Science': 2_090,
'Horror': 37_793,
'Confession': 60_572,
'Finance': 12_250,
'Politics': 16_360,
'Scary': 191_985,
'Support': 12_654,
'Technologies': 32_516,
'Teenage': 66_160,
'Event': 32_769,
'Learned': 67_460,
'Notion': 182_770,
'Wikipedia': 37_583,
'Books': 6_665,
'Extract': 76_050,
'Confessions': 102_701,
'Conspiracy': 75_932,
'Links': 63_674,
'Narcissus': 150_425,
'Relationship': 54_766,
'Relationships': 134_796,
'Reviews': 41_671,
'News': 4_256,
'Translation': 26_820,
'multilingual': 128_406,
}
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : Dict = set()
lowercase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Dict = char
lowercase__ : Tuple = set(__lowerCamelCase )
return pairs
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Optional[Any] = CONTROL_CODES
def __init__( self : int ,_snake_case : str ,_snake_case : Tuple ,_snake_case : List[str]="<unk>" ,**_snake_case : List[str] ) -> Tuple:
"""simple docstring"""
super().__init__(unk_token=_snake_case ,**_snake_case )
with open(_snake_case ,encoding='''utf-8''' ) as vocab_handle:
lowercase__ : Dict = json.load(_snake_case )
lowercase__ : str = {v: k for k, v in self.encoder.items()}
with open(_snake_case ,encoding='''utf-8''' ) as merges_handle:
lowercase__ : Union[str, Any] = merges_handle.read().split('''\n''' )[1:-1]
lowercase__ : List[Any] = [tuple(merge.split() ) for merge in merges]
lowercase__ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
lowercase__ : int = {}
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return len(self.encoder )
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase ( self : Any ,_snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ : str = tuple(_snake_case )
lowercase__ : Dict = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowercase__ : Any = get_pairs(_snake_case )
if not pairs:
return token
while True:
lowercase__ : Dict = min(_snake_case ,key=lambda _snake_case : self.bpe_ranks.get(_snake_case ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : Any = bigram
lowercase__ : Tuple = []
lowercase__ : Any = 0
while i < len(_snake_case ):
try:
lowercase__ : Optional[Any] = word.index(_snake_case ,_snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ : int = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : Tuple = tuple(_snake_case )
lowercase__ : int = new_word
if len(_snake_case ) == 1:
break
else:
lowercase__ : Optional[Any] = get_pairs(_snake_case )
lowercase__ : Union[str, Any] = '''@@ '''.join(_snake_case )
lowercase__ : List[Any] = word[:-4]
lowercase__ : int = word
return word
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = []
lowercase__ : int = re.findall(r'''\S+\n?''' ,_snake_case )
for token in words:
split_tokens.extend(list(self.bpe(_snake_case ).split(''' ''' ) ) )
return split_tokens
def UpperCAmelCase ( self : List[Any] ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
return self.encoder.get(_snake_case ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : str ) -> Optional[Any]:
"""simple docstring"""
return self.decoder.get(_snake_case ,self.unk_token )
def UpperCAmelCase ( self : List[str] ,_snake_case : str ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = ''' '''.join(_snake_case ).replace('''@@ ''' ,'''''' ).strip()
return out_string
def UpperCAmelCase ( self : Dict ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : int = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Dict = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_snake_case ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_snake_case ,ensure_ascii=_snake_case ) + '''\n''' )
lowercase__ : Optional[int] = 0
with open(_snake_case ,'''w''' ,encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowercase__ : List[Any] = token_index
writer.write(''' '''.join(_snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 16
|
'''simple docstring'''
from PIL import Image
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Image:
def brightness(_lowerCAmelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
__a = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 35
| 0
|
"""simple docstring"""
def _A ( UpperCamelCase_ : list[int]) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty")
__lowercase = sum(UpperCamelCase_) / len(UpperCamelCase_) # Calculate the average
return sum(abs(x - average) for x in nums) / len(UpperCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
|
'''simple docstring'''
import argparse
import os
import re
__a = "src/transformers"
# Pattern that looks at the indentation in a line.
__a = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
__a = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__a = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
__a = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__a = re.compile(R"\[([^\]]+)\]")
def __snake_case( _lowerCAmelCase ) -> List[Any]:
snake_case__ : int = _re_indent.search(_lowerCAmelCase )
return "" if search is None else search.groups()[0]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
snake_case__ : str = 0
snake_case__ : Union[str, Any] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCAmelCase ):
index += 1
snake_case__ : Tuple = ["""\n""".join(lines[:index] )]
else:
snake_case__ : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case__ : Optional[int] = [lines[index]]
index += 1
while index < len(_lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCAmelCase ) )
if index < len(_lowerCAmelCase ) - 1:
snake_case__ : str = [lines[index + 1]]
index += 1
else:
snake_case__ : int = []
else:
blocks.append("""\n""".join(_lowerCAmelCase ) )
snake_case__ : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCAmelCase ) > 0:
blocks.append("""\n""".join(_lowerCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCAmelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __snake_case( _lowerCAmelCase ) -> Tuple:
def _inner(_lowerCAmelCase ):
return key(_lowerCAmelCase ).lower().replace("""_""" , """""" )
return _inner
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(_lowerCAmelCase ):
return x
if key is None:
snake_case__ : Optional[int] = noop
# Constants are all uppercase, they go first.
snake_case__ : Optional[int] = [obj for obj in objects if key(_lowerCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case__ : int = [obj for obj in objects if key(_lowerCAmelCase )[0].isupper() and not key(_lowerCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case__ : str = [obj for obj in objects if not key(_lowerCAmelCase )[0].isupper()]
snake_case__ : List[str] = ignore_underscore(_lowerCAmelCase )
return sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> int:
# This inner function sort imports between [ ].
def _replace(_lowerCAmelCase ):
snake_case__ : Union[str, Any] = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
snake_case__ : int = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : List[str] = keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) + "]"
snake_case__ : str = import_statement.split("""\n""" )
if len(_lowerCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case__ : Dict = 2 if lines[1].strip() == """[""" else 1
snake_case__ : str = [(i, _re_strip_line.search(_lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case__ : str = sort_objects(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )
snake_case__ : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
snake_case__ : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : List[str] = keys[:-1]
snake_case__ : int = get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] )
return "\n".join(_lowerCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
snake_case__ : Optional[Any] = _re_bracket_content.sub(_replace , _lowerCAmelCase )
return import_statement
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=True ) -> Dict:
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
snake_case__ : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case__ : Optional[int] = split_code_in_indented_blocks(
_lowerCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case__ : Optional[Any] = main_blocks[block_idx]
snake_case__ : Dict = block.split("""\n""" )
# Get to the start of the imports.
snake_case__ : Dict = 0
while line_idx < len(_lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case__ : Union[str, Any] = len(_lowerCAmelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case__ : List[str] = """\n""".join(block_lines[line_idx:-1] )
snake_case__ : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case__ : Optional[int] = split_code_in_indented_blocks(_lowerCAmelCase , indent_level=_lowerCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case__ : Tuple = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case__ : Optional[Any] = [(pattern.search(_lowerCAmelCase ).groups()[0] if pattern.search(_lowerCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case__ : Dict = [(i, key) for i, key in enumerate(_lowerCAmelCase ) if key is not None]
snake_case__ : Union[str, Any] = [x[0] for x in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case__ : List[Any] = 0
snake_case__ : Optional[Any] = []
for i in range(len(_lowerCAmelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
snake_case__ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_lowerCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
snake_case__ : Dict = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCAmelCase ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(_lowerCAmelCase ) )
def __snake_case( _lowerCAmelCase=True ) -> Tuple:
snake_case__ : str = []
for root, _, files in os.walk(_lowerCAmelCase ):
if "__init__.py" in files:
snake_case__ : Union[str, Any] = sort_imports(os.path.join(_lowerCAmelCase , """__init__.py""" ) , check_only=_lowerCAmelCase )
if result:
snake_case__ : Union[str, Any] = [os.path.join(_lowerCAmelCase , """__init__.py""" )]
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"Would overwrite {len(_lowerCAmelCase )} files, run `make style`." )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__a = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 35
| 0
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class a__ :
def __init__( self : int,_A : Tuple,_A : Union[str, Any]=14,_A : int=7,_A : Tuple=True,_A : Tuple=True,_A : int=False,_A : Dict=True,_A : List[Any]=99,_A : List[Any]=32,_A : Union[str, Any]=4,_A : List[str]=4,_A : str=4,_A : List[Any]=37,_A : int="gelu",_A : str=0.1,_A : Tuple=0.1,_A : Tuple=512,_A : int=0.02,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : str = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = rotary_dim
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : str = vocab_size - 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_size - 1
SCREAMING_SNAKE_CASE_ : int = vocab_size - 1
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = GPTJConfig(
vocab_size=self.vocab_size,n_embd=self.hidden_size,n_layer=self.num_hidden_layers,n_head=self.num_attention_heads,n_positions=self.max_position_embeddings,use_cache=_A,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,rotary_dim=self.rotary_dim,)
return (config, input_ids, input_mask)
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __UpperCamelCase ( self : int,_A : Tuple,_A : str,_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 20
SCREAMING_SNAKE_CASE_ : Any = model_class_name(_A )
SCREAMING_SNAKE_CASE_ : Dict = model.init_cache(input_ids.shape[0],_A )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.ones((input_ids.shape[0], max_decoder_length),dtype="i4" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :],(input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE_ : str = model(
input_ids[:, :-1],attention_mask=_A,past_key_values=_A,position_ids=_A,)
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]],dtype="i4" )
SCREAMING_SNAKE_CASE_ : Any = model(
input_ids[:, -1:],attention_mask=_A,past_key_values=outputs_cache.past_key_values,position_ids=_A,)
SCREAMING_SNAKE_CASE_ : Any = model(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3,msg=F'Max diff is {diff}' )
def __UpperCamelCase ( self : Optional[Any],_A : int,_A : Union[str, Any],_A : int,_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 20
SCREAMING_SNAKE_CASE_ : List[str] = model_class_name(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )],axis=-1,)
SCREAMING_SNAKE_CASE_ : Tuple = model.init_cache(input_ids.shape[0],_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :],(input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE_ : List[str] = model(
input_ids[:, :-1],attention_mask=_A,past_key_values=_A,position_ids=_A,)
SCREAMING_SNAKE_CASE_ : Dict = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]],dtype="i4" )
SCREAMING_SNAKE_CASE_ : int = model(
input_ids[:, -1:],past_key_values=outputs_cache.past_key_values,attention_mask=_A,position_ids=_A,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(_A,attention_mask=_A )
SCREAMING_SNAKE_CASE_ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3,msg=F'Max diff is {diff}' )
@require_flax
class a__ ( A__ , A__ , unittest.TestCase ):
A = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = FlaxGPTJModelTester(self )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_A,_A,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_A,_A,_A,_A )
@tooslow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = GPTaTokenizer.from_pretrained("gpt2",pad_token="<|endoftext|>",padding_side="left" )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(["Hello this is a long string", "Hey"],return_tensors="np",padding=_A,truncation=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Any = model.config.eos_token_id
SCREAMING_SNAKE_CASE_ : Dict = jax.jit(model.generate )
SCREAMING_SNAKE_CASE_ : str = jit_generate(
inputs["input_ids"],attention_mask=inputs["attention_mask"],pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.batch_decode(_A,skip_special_tokens=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(_A,_A )
@is_pt_flax_cross_test
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE_ : Tuple = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_ : str = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ : str = getattr(_A,_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = pt_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randint(0,seq_length - 1,size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : List[Any] = pt_model_class(_A ).eval()
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A,dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict(),_A )
SCREAMING_SNAKE_CASE_ : Any = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = pt_model(**_A ).to_tuple()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ),len(_A ),"Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A,_A ):
self.assert_almost_equals(fx_output[:, -1],pt_output[:, -1].numpy(),4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : int = model_class.from_pretrained(_A,from_pt=_A )
SCREAMING_SNAKE_CASE_ : str = fx_model_loaded(**_A ).to_tuple()
self.assertEqual(
len(_A ),len(_A ),"Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(_A,_A ):
self.assert_almost_equals(fx_output_loaded[:, -1],pt_output[:, -1].numpy(),4E-2 )
@is_pt_flax_cross_test
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE_ : str = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : Any = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_ : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = pt_model_class(_A ).eval()
SCREAMING_SNAKE_CASE_ : Any = model_class(_A,dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ : Tuple = load_flax_weights_in_pytorch_model(_A,fx_model.params )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = pt_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_ : Optional[int] = np.random.randint(0,seq_length - 1,size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : int = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = pt_model(**_A ).to_tuple()
SCREAMING_SNAKE_CASE_ : List[str] = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ),len(_A ),"Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A,_A ):
self.assert_almost_equals(fx_output[:, -1],pt_output[:, -1].numpy(),4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : Tuple = pt_model_class.from_pretrained(_A,from_flax=_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = pt_model_loaded(**_A ).to_tuple()
self.assertEqual(
len(_A ),len(_A ),"Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A,_A ):
self.assert_almost_equals(fx_output[:, -1],pt_output[:, -1].numpy(),4E-2 )
@tooslow
def __UpperCamelCase ( self : str ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : str = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
| 18
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
| 0
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__A =['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
__A ={'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
__A =''' Hello world! cécé herlolip'''
__A =[
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = dct.pop(lowerCamelCase__ )
lowerCamelCase_ = val
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )
lowerCamelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
if not os.path.exists(lowerCamelCase__ ):
lowerCamelCase_ = torch.hub.load("pytorch/fairseq" , lowerCamelCase__ ).eval()
else:
lowerCamelCase_ = load_xsum_checkpoint(lowerCamelCase__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCamelCase_ = checkpoint_path.replace("." , "-" )
lowerCamelCase_ = BartConfig.from_pretrained(lowerCamelCase__ )
lowerCamelCase_ = bart.encode(lowerCamelCase__ ).unsqueeze(0 )
lowerCamelCase_ = BartTokenizer.from_pretrained(lowerCamelCase__ ).encode(lowerCamelCase__ , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(lowerCamelCase__ , lowerCamelCase__ ).all():
raise ValueError(
F'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
lowerCamelCase_ = bart.state_dict()
remove_ignore_keys_(lowerCamelCase__ )
lowerCamelCase_ = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = BartForSequenceClassification(lowerCamelCase__ ).eval()
model.load_state_dict(lowerCamelCase__ )
lowerCamelCase_ = bart.predict("mnli" , lowerCamelCase__ , return_logits=lowerCamelCase__ )
lowerCamelCase_ = model(lowerCamelCase__ )[0] # logits
else: # no classification heads to worry about
lowerCamelCase_ = bart.model.state_dict()
remove_ignore_keys_(lowerCamelCase__ )
lowerCamelCase_ = state_dict["decoder.embed_tokens.weight"]
lowerCamelCase_ = bart.extract_features(lowerCamelCase__ )
if hf_checkpoint_name == "facebook/bart-large":
lowerCamelCase_ = BartModel(lowerCamelCase__ ).eval()
model.load_state_dict(lowerCamelCase__ )
lowerCamelCase_ = model(lowerCamelCase__ ).model[0]
else:
lowerCamelCase_ = BartForConditionalGeneration(lowerCamelCase__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowerCamelCase__ )
if hasattr(lowerCamelCase__ , "lm_head" ):
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
lowerCamelCase_ = model.model(lowerCamelCase__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
__A =parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 19
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__a = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
snake_case__ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
snake_case__ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
snake_case__ : int = value
elif weight_type == "weight_g":
snake_case__ : List[str] = value
elif weight_type == "weight_v":
snake_case__ : List[str] = value
elif weight_type == "bias":
snake_case__ : Optional[Any] = value
else:
snake_case__ : str = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
snake_case__ : Union[str, Any] = []
snake_case__ : Dict = fairseq_model.state_dict()
snake_case__ : List[Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case__ : Optional[int] = None
for name, value in fairseq_dict.items():
snake_case__ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case__ : Union[str, Any] = True
elif name.split(""".""" )[0] == "proj":
snake_case__ : Tuple = fairseq_model.proj
snake_case__ : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case__ : Optional[Any] = True
if "*" in mapped_key:
snake_case__ : Optional[int] = name.split(_lowerCAmelCase )[0].split(""".""" )[-2]
snake_case__ : Tuple = mapped_key.replace("""*""" , _lowerCAmelCase )
if "weight_g" in name:
snake_case__ : str = """weight_g"""
elif "weight_v" in name:
snake_case__ : int = """weight_v"""
elif "bias" in name:
snake_case__ : Dict = """bias"""
elif "weight" in name:
snake_case__ : Union[str, Any] = """weight"""
else:
snake_case__ : Union[str, Any] = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
snake_case__ : int = full_name.split("""conv_layers.""" )[-1]
snake_case__ : Dict = name.split(""".""" )
snake_case__ : Any = int(items[0] )
snake_case__ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
snake_case__ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
snake_case__ : str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
snake_case__ : Union[str, Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
snake_case__ : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ , snake_case__ : str = emb.weight.shape
snake_case__ : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
snake_case__ : List[str] = emb.weight.data
return lin_layer
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
snake_case__ : int = f.readlines()
snake_case__ : List[Any] = [line.split(""" """ )[0] for line in lines]
snake_case__ : Union[str, Any] = len(_lowerCAmelCase )
snake_case__ : Any = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> int:
snake_case__ : Optional[Any] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
snake_case__ : Optional[Any] = SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase )
snake_case__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
# set weights for wav2vec2 encoder
snake_case__ : Optional[Any] = WavaVecaModel(_lowerCAmelCase )
snake_case__ : Dict = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
snake_case__ : Optional[Any] = SpeechaTextaForCausalLM(_lowerCAmelCase )
snake_case__ , snake_case__ : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
snake_case__ : Tuple = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
snake_case__ : List[Any] = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
snake_case__ : Tuple = False
# add projection layer
snake_case__ : Union[str, Any] = nn.Parameter(projection_layer.weight )
snake_case__ : int = nn.Parameter(projection_layer.bias )
snake_case__ : Tuple = create_vocab_dict(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , """vocab.json""" ) , """w""" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Tuple = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , """vocab.json""" ) )
tokenizer.save_pretrained(_lowerCAmelCase )
snake_case__ : Optional[Any] = hf_wavavec.config.to_dict()
snake_case__ : Tuple = tokenizer.pad_token_id
snake_case__ : Optional[Any] = tokenizer.bos_token_id
snake_case__ : int = tokenizer.eos_token_id
snake_case__ : str = """speech_to_text_2"""
snake_case__ : List[Any] = """wav2vec2"""
snake_case__ : List[str] = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 35
| 0
|
import os
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Tuple:
lowercase : Dict = len(grid[0] )
lowercase : Dict = len(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = 0
lowercase : str = 0
lowercase : Tuple = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(n_rows - 3 ):
lowercase : int = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowercase : List[Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowercase : List[str] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowercase : int = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowercase : Optional[Any] = max(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if max_product > largest:
lowercase : Any = max_product
return largest
def _snake_case( ) -> Optional[Any]:
lowercase : List[Any] = []
with open(os.path.dirname(SCREAMING_SNAKE_CASE__ ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
lowercase : Dict = [[int(SCREAMING_SNAKE_CASE__ ) for i in grid[j]] for j in range(len(SCREAMING_SNAKE_CASE__ ) )]
return largest_product(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(solution())
| 20
|
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Optional[int] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f"{test_file} instead." )
snake_case__ : Dict = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
snake_case__ : int = components[:-1] + [test_fn.replace(""".py""" , """""" )]
snake_case__ : int = """.""".join(_lowerCAmelCase )
return test_module_path
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ : str = get_module_path(_lowerCAmelCase )
snake_case__ : Union[str, Any] = importlib.import_module(_lowerCAmelCase )
return test_module
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : List[Any] = []
snake_case__ : Optional[int] = get_test_module(_lowerCAmelCase )
for attr in dir(_lowerCAmelCase ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : List[str] = []
snake_case__ : Any = get_test_module(_lowerCAmelCase )
for attr in dir(_lowerCAmelCase ):
snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
snake_case__ : List[str] = getattr(_lowerCAmelCase , """all_model_classes""" , [] )
if len(_lowerCAmelCase ) > 0:
test_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : Any = get_test_classes(_lowerCAmelCase )
snake_case__ : Optional[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
snake_case__ : Optional[int] = test_class()
if hasattr(_lowerCAmelCase , """setUp""" ):
test.setUp()
snake_case__ : Any = None
if hasattr(_lowerCAmelCase , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
snake_case__ : Tuple = test.model_tester.__class__
return model_tester
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
snake_case__ : Union[str, Any] = get_test_classes(_lowerCAmelCase )
snake_case__ : str = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
snake_case__ : Optional[Any] = get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Union[str, Any] = []
for test_class in test_classes:
snake_case__ : Tuple = get_model_tester_from_test_class(_lowerCAmelCase )
if tester_class is not None:
tester_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
snake_case__ : Optional[Any] = get_test_classes(_lowerCAmelCase )
snake_case__ : Union[str, Any] = {test_class: get_model_tester_from_test_class(_lowerCAmelCase ) for test_class in test_classes}
return test_tester_mapping
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Any = get_model_classes(_lowerCAmelCase )
snake_case__ : Any = {
model_class: get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes
}
return model_test_mapping
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Union[str, Any] = get_model_classes(_lowerCAmelCase )
snake_case__ : str = {
model_class: get_tester_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def __snake_case( _lowerCAmelCase ) -> int:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return o
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return o.__name__
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return [to_json(_lowerCAmelCase ) for x in o]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {to_json(_lowerCAmelCase ): to_json(_lowerCAmelCase ) for k, v in o.items()}
else:
return o
| 35
| 0
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'], model_result['ss']):
_lowercase : Union[str, Any] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = 'sshleifer/tiny-gpt2'
_lowercase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase, inference=lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=lowerCamelCase, )
_lowercase : Optional[int] = PyTorchBenchmark(lowerCamelCase)
_lowercase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : int = 'sgugger/tiny-distilbert-classification'
_lowercase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase, inference=lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=lowerCamelCase, only_pretrain_model=lowerCamelCase, )
_lowercase : Any = PyTorchBenchmark(lowerCamelCase)
_lowercase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = 'sshleifer/tiny-gpt2'
_lowercase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase, inference=lowerCamelCase, torchscript=lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=lowerCamelCase, )
_lowercase : Optional[Any] = PyTorchBenchmark(lowerCamelCase)
_lowercase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == 'cpu', 'Cant do half precision')
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[Any] = 'sshleifer/tiny-gpt2'
_lowercase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase, inference=lowerCamelCase, fpaa=lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=lowerCamelCase, )
_lowercase : Union[str, Any] = PyTorchBenchmark(lowerCamelCase)
_lowercase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : int = 'sshleifer/tiny-gpt2'
_lowercase : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase)
# set architectures equal to `None`
_lowercase : Tuple = None
_lowercase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase, inference=lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=lowerCamelCase, )
_lowercase : str = PyTorchBenchmark(lowerCamelCase, configs=[config])
_lowercase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = 'sshleifer/tiny-gpt2'
_lowercase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase, inference=lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=lowerCamelCase, )
_lowercase : Dict = PyTorchBenchmark(lowerCamelCase)
_lowercase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == 'cpu', 'Can\'t do half precision')
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = 'sshleifer/tiny-gpt2'
_lowercase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase, inference=lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], fpaa=lowerCamelCase, multi_process=lowerCamelCase, )
_lowercase : Optional[int] = PyTorchBenchmark(lowerCamelCase)
_lowercase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[str] = 'sshleifer/tiny-gpt2'
_lowercase : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase)
_lowercase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase, inference=lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=lowerCamelCase, )
_lowercase : Optional[int] = PyTorchBenchmark(lowerCamelCase, configs=[config])
_lowercase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = 'sshleifer/tinier_bart'
_lowercase : Any = AutoConfig.from_pretrained(lowerCamelCase)
_lowercase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase, inference=lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=lowerCamelCase, )
_lowercase : int = PyTorchBenchmark(lowerCamelCase, configs=[config])
_lowercase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = 'sshleifer/tiny-gpt2'
_lowercase : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase)
_lowercase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase, inference=lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=lowerCamelCase, )
_lowercase : int = PyTorchBenchmark(lowerCamelCase, configs=[config])
_lowercase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[Any] = 'sshleifer/tinier_bart'
_lowercase : Optional[Any] = AutoConfig.from_pretrained(lowerCamelCase)
_lowercase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase, inference=lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=lowerCamelCase, )
_lowercase : Optional[Any] = PyTorchBenchmark(lowerCamelCase, configs=[config])
_lowercase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[str] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase, inference=lowerCamelCase, save_to_csv=lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(lowerCamelCase, 'inf_time.csv'), train_memory_csv_file=os.path.join(lowerCamelCase, 'train_mem.csv'), inference_memory_csv_file=os.path.join(lowerCamelCase, 'inf_mem.csv'), train_time_csv_file=os.path.join(lowerCamelCase, 'train_time.csv'), env_info_csv_file=os.path.join(lowerCamelCase, 'env.csv'), multi_process=lowerCamelCase, )
_lowercase : Union[str, Any] = PyTorchBenchmark(lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(lowerCamelCase, 'inf_time.csv')).exists())
self.assertTrue(Path(os.path.join(lowerCamelCase, 'train_time.csv')).exists())
self.assertTrue(Path(os.path.join(lowerCamelCase, 'inf_mem.csv')).exists())
self.assertTrue(Path(os.path.join(lowerCamelCase, 'train_mem.csv')).exists())
self.assertTrue(Path(os.path.join(lowerCamelCase, 'env.csv')).exists())
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[int] = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowerCamelCase):
self.assertTrue(hasattr(lowerCamelCase, 'sequential'))
self.assertTrue(hasattr(lowerCamelCase, 'cumulative'))
self.assertTrue(hasattr(lowerCamelCase, 'current'))
self.assertTrue(hasattr(lowerCamelCase, 'total'))
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=lowerCamelCase, inference=lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(lowerCamelCase, 'log.txt'), log_print=lowerCamelCase, trace_memory_line_by_line=lowerCamelCase, multi_process=lowerCamelCase, )
_lowercase : Optional[Any] = PyTorchBenchmark(lowerCamelCase)
_lowercase : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(lowerCamelCase, 'log.txt')).exists())
| 21
|
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __snake_case( _lowerCAmelCase ) -> List[Any]:
snake_case__ : Dict = SwinConfig()
snake_case__ : Optional[Any] = swin_name.split("""_""" )
snake_case__ : Any = name_split[1]
snake_case__ : List[Any] = int(name_split[4] )
snake_case__ : int = int(name_split[3][-1] )
if model_size == "tiny":
snake_case__ : List[Any] = 96
snake_case__ : int = (2, 2, 6, 2)
snake_case__ : int = (3, 6, 12, 24)
elif model_size == "small":
snake_case__ : Union[str, Any] = 96
snake_case__ : Optional[Any] = (2, 2, 18, 2)
snake_case__ : str = (3, 6, 12, 24)
elif model_size == "base":
snake_case__ : Dict = 128
snake_case__ : str = (2, 2, 18, 2)
snake_case__ : Dict = (4, 8, 16, 32)
else:
snake_case__ : List[str] = 192
snake_case__ : str = (2, 2, 18, 2)
snake_case__ : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
snake_case__ : str = 21_841
else:
snake_case__ : List[str] = 1_000
snake_case__ : int = """huggingface/label-files"""
snake_case__ : Any = """imagenet-1k-id2label.json"""
snake_case__ : List[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : Dict = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : Optional[int] = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case__ : List[Any] = img_size
snake_case__ : Dict = num_classes
snake_case__ : Dict = embed_dim
snake_case__ : Optional[int] = depths
snake_case__ : int = num_heads
snake_case__ : Optional[int] = window_size
return config
def __snake_case( _lowerCAmelCase ) -> Dict:
if "patch_embed.proj" in name:
snake_case__ : List[str] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case__ : int = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
snake_case__ : str = """encoder.""" + name
if "attn.proj" in name:
snake_case__ : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
snake_case__ : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
snake_case__ : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case__ : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case__ : Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
snake_case__ : Tuple = """layernorm.weight"""
if name == "norm.bias":
snake_case__ : Union[str, Any] = """layernorm.bias"""
if "head" in name:
snake_case__ : Optional[int] = name.replace("""head""" , """classifier""" )
else:
snake_case__ : List[str] = """swin.""" + name
return name
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case__ : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
snake_case__ : Dict = key.split(""".""" )
snake_case__ : Optional[int] = int(key_split[1] )
snake_case__ : Union[str, Any] = int(key_split[3] )
snake_case__ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case__ : Optional[Any] = val[:dim, :]
snake_case__ : Tuple = val[
dim : dim * 2, :
]
snake_case__ : Dict = val[-dim:, :]
else:
snake_case__ : Tuple = val[
:dim
]
snake_case__ : int = val[
dim : dim * 2
]
snake_case__ : int = val[
-dim:
]
else:
snake_case__ : Union[str, Any] = val
return orig_state_dict
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : Optional[int] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
snake_case__ : Optional[int] = get_swin_config(_lowerCAmelCase )
snake_case__ : Optional[Any] = SwinForImageClassification(_lowerCAmelCase )
model.eval()
snake_case__ : str = convert_state_dict(timm_model.state_dict() , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
snake_case__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Dict = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
snake_case__ : Dict = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
snake_case__ : Optional[int] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
snake_case__ : Optional[Any] = timm_model(inputs["""pixel_values"""] )
snake_case__ : str = model(**_lowerCAmelCase ).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print(f"Saving model {swin_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 35
| 0
|
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowercase : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
_UpperCAmelCase = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$" , __lowercase )
if matches:
_UpperCAmelCase = float(matches[1] )
_UpperCAmelCase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_UpperCAmelCase = 1001
_UpperCAmelCase = "imagenet-1k-id2label.json"
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(__lowercase ) + 1: v for k, v in idalabel.items()}
_UpperCAmelCase = "background"
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Optional[int] , __lowercase : int , __lowercase : Optional[Any]=False ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = get_mobilenet_va_config(__lowercase )
# Load 🤗 model
_UpperCAmelCase = MobileNetVaForImageClassification(__lowercase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__lowercase , __lowercase , __lowercase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_UpperCAmelCase = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
_UpperCAmelCase = model(**__lowercase )
_UpperCAmelCase = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
_UpperCAmelCase = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
_UpperCAmelCase = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
_UpperCAmelCase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __lowercase , atol=1E-4 )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowercase )
if push_to_hub:
print("Pushing to the hub..." )
_UpperCAmelCase = "google/" + model_name
image_processor.push_to_hub(__lowercase )
model.push_to_hub(__lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 22
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__a = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : List[str] , *snake_case_ : str , **snake_case_ : List[str] ):
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 35
| 0
|
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase__: Dict = logging.getLogger()
def snake_case_ ( ) -> Dict:
UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase : List[Any] = parser.parse_args()
return args.f
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def A ( self : Optional[int] ) -> None:
UpperCAmelCase : Any = logging.StreamHandler(sys.stdout )
logger.addHandler(__snake_case )
def A ( self : str , __snake_case : Optional[int] ) -> int:
UpperCAmelCase : Any = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__snake_case , '''argv''' , __snake_case ):
UpperCAmelCase : int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__snake_case , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def A ( self : Tuple ) -> int:
UpperCAmelCase : List[Any] = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__snake_case )
UpperCAmelCase : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__snake_case )
UpperCAmelCase : Optional[int] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__snake_case )
| 23
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__a = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = field(default=_a , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=_a , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=_a , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=_a , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=_a , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def lowerCamelCase ( self : List[str] ):
snake_case__ : int = super().to_dict()
for k, v in d.items():
if isinstance(snake_case_ , snake_case_ ):
snake_case__ : Optional[int] = v.to_dict()
return d
| 35
| 0
|
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : int = 4 ) -> list[list[int]]:
__snake_case = abs(snake_case_ ) or 4
return [[1 + x + y * row_size for x in range(snake_case_ )] for y in range(snake_case_ )]
def lowerCamelCase__ ( snake_case_ : list[list[int]] ) -> list[list[int]]:
return reverse_row(transpose(snake_case_ ) )
# OR.. transpose(reverse_column(matrix))
def lowerCamelCase__ ( snake_case_ : list[list[int]] ) -> list[list[int]]:
return reverse_row(reverse_column(snake_case_ ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCamelCase__ ( snake_case_ : list[list[int]] ) -> list[list[int]]:
return reverse_column(transpose(snake_case_ ) )
# OR.. transpose(reverse_row(matrix))
def lowerCamelCase__ ( snake_case_ : list[list[int]] ) -> list[list[int]]:
__snake_case = [list(snake_case_ ) for x in zip(*snake_case_ )]
return matrix
def lowerCamelCase__ ( snake_case_ : list[list[int]] ) -> list[list[int]]:
__snake_case = matrix[::-1]
return matrix
def lowerCamelCase__ ( snake_case_ : list[list[int]] ) -> list[list[int]]:
__snake_case = [x[::-1] for x in matrix]
return matrix
def lowerCamelCase__ ( snake_case_ : list[list[int]] ) -> None:
for i in matrix:
print(*snake_case_ )
if __name__ == "__main__":
snake_case_ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
snake_case_ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
snake_case_ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 24
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
snake_case__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = """"""
else:
snake_case__ : Dict = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[int] = in_proj_bias[: config.hidden_size]
snake_case__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Tuple = in_proj_bias[-config.hidden_size :]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : str = dct.pop(_lowerCAmelCase )
snake_case__ : Tuple = val
def __snake_case( ) -> Tuple:
snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : Union[str, Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : int = 1_000
snake_case__ : Any = """huggingface/label-files"""
snake_case__ : Optional[Any] = """imagenet-1k-id2label.json"""
snake_case__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[str] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = int(deit_name[-6:-4] )
snake_case__ : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ : Tuple = 192
snake_case__ : Union[str, Any] = 768
snake_case__ : Tuple = 12
snake_case__ : Union[str, Any] = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ : str = 384
snake_case__ : Any = 1_536
snake_case__ : str = 12
snake_case__ : int = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ : Union[str, Any] = 1_024
snake_case__ : Any = 4_096
snake_case__ : List[Any] = 24
snake_case__ : Tuple = 16
# load original model from timm
snake_case__ : List[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[Any] = timm_model.state_dict()
snake_case__ : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
snake_case__ : Optional[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : List[Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : Optional[Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size )
snake_case__ : str = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ : Optional[Any] = encoding["""pixel_values"""]
snake_case__ : Tuple = model(_lowerCAmelCase )
snake_case__ : Optional[int] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 35
| 0
|
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase__ : str = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
UpperCAmelCase__ : str = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
UpperCAmelCase__ : Optional[int] = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=False ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = compute_bleu(
reference_corpus=SCREAMING_SNAKE_CASE__ , translation_corpus=SCREAMING_SNAKE_CASE__ , max_order=SCREAMING_SNAKE_CASE__ , smooth=SCREAMING_SNAKE_CASE__ )
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : Union[str, Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 25
|
'''simple docstring'''
import string
from math import logaa
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : List[str] = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
snake_case__ : List[str] = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int, int]:
snake_case__ : Dict = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
snake_case__ : Any = corpus_without_punctuation.split("""\n""" )
snake_case__ : int = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_lowerCAmelCase ))
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float:
return round(tf * idf , 3 )
| 35
| 0
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_snake_case = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase ( datasets.BuilderConfig ):
_a = None
_a = "utf-8"
_a = None
_a = None
_a = True # deprecated
_a = None # deprecated
_a = 1_0 << 2_0 # 10MB
_a = None
class lowercase ( datasets.ArrowBasedBuilder ):
_a = JsonConfig
def a__ ( self ) -> Union[str, Any]:
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
_A : Optional[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def a__ ( self , _a ) -> Union[str, Any]:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_A : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_A : List[str] = data_files
if isinstance(_a , _a ):
_A : Optional[int] = [files]
_A : str = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_A : Tuple = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_A : Optional[int] = [files]
_A : Tuple = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={"""files""": files} ) )
return splits
def a__ ( self , _a ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_A : List[Any] = self.config.features.arrow_schema.field(_a ).type
_A : str = pa_table.append_column(_a , pa.array([None] * len(_a ) , type=_a ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_A : Optional[Any] = table_cast(_a , self.config.features.arrow_schema )
return pa_table
def a__ ( self , _a ) -> Optional[int]:
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_A : Any = json.load(_a )
# We keep only the field we are interested in
_A : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_a , (list, tuple) ):
_A : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
_A : Union[str, Any] = {col: [row.get(_a ) for row in dataset] for col in keys}
else:
_A : List[str] = dataset
_A : Optional[Any] = pa.Table.from_pydict(_a )
yield file_idx, self._cast_table(_a )
# If the file has one json object per line
else:
with open(_a , """rb""" ) as f:
_A : Dict = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_A : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
_A : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
_A : List[Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_a )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_A : List[str] = batch.decode(self.config.encoding , errors=_a ).encode("""utf-8""" )
try:
while True:
try:
_A : Union[str, Any] = paj.read_json(
io.BytesIO(_a ) , read_options=paj.ReadOptions(block_size=_a ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_a , pa.ArrowInvalid )
and "straddling" not in str(_a )
or block_size > len(_a )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(_a )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_A : List[str] = json.load(_a )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_a , _a ): # list is the only sequence type supported in JSON
try:
_A : str = set().union(*[row.keys() for row in dataset] )
_A : Dict = {col: [row.get(_a ) for row in dataset] for col in keys}
_A : List[Any] = pa.Table.from_pydict(_a )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(_a )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_a )
batch_idx += 1
| 26
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : int , snake_case_ : Tuple , snake_case_ : List[str]=3 , snake_case_ : Tuple=32 , snake_case_ : List[Any]=3 , snake_case_ : List[str]=10 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : Tuple=[1, 1, 2, 1] , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : int="relu" , snake_case_ : List[Any]=3 , snake_case_ : str=None , ):
snake_case__ : List[Any] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : int = image_size
snake_case__ : List[Any] = num_channels
snake_case__ : Optional[Any] = embeddings_size
snake_case__ : Optional[int] = hidden_sizes
snake_case__ : Tuple = depths
snake_case__ : Any = is_training
snake_case__ : Optional[int] = use_labels
snake_case__ : Optional[int] = hidden_act
snake_case__ : Optional[int] = num_labels
snake_case__ : int = scope
snake_case__ : Tuple = len(snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : int ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ):
snake_case__ : Optional[Any] = TFResNetModel(config=snake_case_ )
snake_case__ : int = model(snake_case_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] ):
snake_case__ : str = self.num_labels
snake_case__ : Optional[int] = TFResNetForImageClassification(snake_case_ )
snake_case__ : Tuple = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Tuple ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs
snake_case__ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowercase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Tuple = TFResNetModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowerCamelCase ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : str ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase ( self : int ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase ( self : List[Any] ):
pass
def lowerCamelCase ( self : List[Any] ):
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(snake_case_ )
snake_case__ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase ( self : List[str] ):
def check_hidden_states_output(snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] ):
snake_case__ : List[Any] = model_class(snake_case_ )
snake_case__ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
snake_case__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ : Dict = layer_type
snake_case__ : Optional[int] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[Any] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def lowerCamelCase ( self : Optional[Any] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFResNetModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def __snake_case( ) -> Optional[int]:
snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase ( self : List[Any] ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : List[Any] = self.default_image_processor
snake_case__ : List[Any] = prepare_img()
snake_case__ : List[str] = image_processor(images=snake_case_ , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[Any] = model(**snake_case_ )
# verify the logits
snake_case__ : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case_ , atol=1E-4 ) )
| 35
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Tuple = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "glpn"
def __init__( self : Optional[Any] , snake_case_ : List[str]=3 , snake_case_ : Dict=4 , snake_case_ : List[Any]=[2, 2, 2, 2] , snake_case_ : int=[8, 4, 2, 1] , snake_case_ : List[str]=[32, 64, 160, 256] , snake_case_ : Tuple=[7, 3, 3, 3] , snake_case_ : List[Any]=[4, 2, 2, 2] , snake_case_ : Tuple=[1, 2, 5, 8] , snake_case_ : List[str]=[4, 4, 4, 4] , snake_case_ : Optional[int]="gelu" , snake_case_ : Dict=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : List[Any]=0.02 , snake_case_ : Tuple=0.1 , snake_case_ : Any=1E-6 , snake_case_ : Dict=64 , snake_case_ : Tuple=10 , snake_case_ : List[Any]=-1 , **snake_case_ : Optional[Any] , ):
super().__init__(**snake_case_ )
snake_case__ : Optional[Any] = num_channels
snake_case__ : Dict = num_encoder_blocks
snake_case__ : Tuple = depths
snake_case__ : Union[str, Any] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : Optional[Any] = patch_sizes
snake_case__ : int = strides
snake_case__ : List[Any] = mlp_ratios
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : str = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : Tuple = decoder_hidden_size
snake_case__ : List[Any] = max_depth
snake_case__ : Dict = head_in_index
| 35
| 0
|
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
UpperCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
UpperCamelCase = tokenizer('Hello there' , return_tensors='np' ).input_ids
UpperCamelCase = tokenizer('Hi I am' , return_tensors='np' ).input_ids
UpperCamelCase = shift_tokens_right(UpperCamelCase__ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCamelCase = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
UpperCamelCase = optax.softmax_cross_entropy(UpperCamelCase__ , onehot(UpperCamelCase__ , logits.shape[-1] ) ).mean()
UpperCamelCase = -(labels.shape[-1] * loss.item())
UpperCamelCase = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 28
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__a = logging.get_logger(__name__)
__a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
__a = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
__a = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = RoFormerTokenizer
def __init__( self : List[Any] , snake_case_ : List[str]=None , snake_case_ : Dict=None , snake_case_ : Any=True , snake_case_ : str="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : Optional[Any]="[PAD]" , snake_case_ : Union[str, Any]="[CLS]" , snake_case_ : Union[str, Any]="[MASK]" , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=None , **snake_case_ : Tuple , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
snake_case__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , snake_case_ ) != strip_accents
):
snake_case__ : str = getattr(snake_case_ , pre_tok_state.pop("""type""" ) )
snake_case__ : Optional[int] = do_lower_case
snake_case__ : Union[str, Any] = strip_accents
snake_case__ : Union[str, Any] = pre_tok_class(**snake_case_ )
snake_case__ : str = do_lower_case
def __getstate__( self : int ):
snake_case__ : List[Any] = self.__dict__.copy()
snake_case__ : str = BertPreTokenizer()
return state
def __setstate__( self : Dict , snake_case_ : Dict ):
snake_case__ : List[Any] = d
snake_case__ : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab()
snake_case__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) )
def lowerCamelCase ( self : str , snake_case_ : Optional[Any] , snake_case_ : List[str]=None ):
snake_case__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
snake_case__ : int = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Optional[str] = None ):
snake_case__ : Union[str, Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCamelCase ( self : Dict , snake_case_ : List[str] , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ):
snake_case__ : Optional[Any] = BertPreTokenizer()
return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
| 35
| 0
|
import requests
from bsa import BeautifulSoup
def lowercase__ ( __snake_case : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
UpperCAmelCase_ : int = BeautifulSoup(requests.get(__snake_case ).text , 'html.parser' )
UpperCAmelCase_ : Tuple = soup.findAll('h1' )
UpperCAmelCase_ : Tuple = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__snake_case , __snake_case )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F'{key}\n{value}\n')
| 29
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : int = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ : Dict = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ : List[str] = 0.01
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
snake_case__ : str = time.time()
locka.acquire(_lowerCAmelCase )
assert time.time() - _start > timeout
def __snake_case( _lowerCAmelCase ) -> Tuple:
snake_case__ : Dict = """a""" * 1_000 + """.lock"""
snake_case__ : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(_lowerCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
snake_case__ : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
locka.acquire(0 )
| 35
| 0
|
def a ( snake_case__: int ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('''Input value must be an \'int\' type''' )
lowercase_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
snake_case__ : str = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __snake_case( ) -> List[str]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
| 0
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Dict = MODEL_FOR_CAUSAL_LM_MAPPING
__UpperCamelCase: Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _A ( self : Tuple ):
_UpperCAmelCase : Union[str, Any] = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
_UpperCAmelCase : Optional[int] = text_generator("This is a test" , do_sample=A )
self.assertEqual(
A , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
_UpperCAmelCase : Dict = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
A , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
_UpperCAmelCase : Any = text_generator("This is a test" , do_sample=A , num_return_sequences=2 , return_tensors=A )
self.assertEqual(
A , [
{"generated_token_ids": ANY(A )},
{"generated_token_ids": ANY(A )},
] , )
_UpperCAmelCase : Any = text_generator.model.config.eos_token_id
_UpperCAmelCase : List[Any] = "<pad>"
_UpperCAmelCase : Dict = text_generator(
["This is a test", "This is a second test"] , do_sample=A , num_return_sequences=2 , batch_size=2 , return_tensors=A , )
self.assertEqual(
A , [
[
{"generated_token_ids": ANY(A )},
{"generated_token_ids": ANY(A )},
],
[
{"generated_token_ids": ANY(A )},
{"generated_token_ids": ANY(A )},
],
] , )
@require_tf
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : int = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
_UpperCAmelCase : int = text_generator("This is a test" , do_sample=A )
self.assertEqual(
A , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
_UpperCAmelCase : List[str] = text_generator(["This is a test", "This is a second test"] , do_sample=A )
self.assertEqual(
A , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def _A ( self : Any , A : List[str] , A : List[str] , A : Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = TextGenerationPipeline(model=A , tokenizer=A )
return text_generator, ["This is a test", "Another test"]
def _A ( self : Any ):
_UpperCAmelCase : int = "Hello I believe in"
_UpperCAmelCase : int = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase : Optional[int] = text_generator(A )
self.assertEqual(
A , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
_UpperCAmelCase : List[str] = text_generator(A , stop_sequence=" fe" )
self.assertEqual(A , [{"generated_text": "Hello I believe in fe"}] )
def _A ( self : Optional[int] , A : Optional[Any] , A : List[Any] ):
_UpperCAmelCase : List[str] = text_generator.model
_UpperCAmelCase : str = text_generator.tokenizer
_UpperCAmelCase : Dict = text_generator("This is a test" )
self.assertEqual(A , [{"generated_text": ANY(A )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
_UpperCAmelCase : Dict = text_generator("This is a test" , return_full_text=A )
self.assertEqual(A , [{"generated_text": ANY(A )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
_UpperCAmelCase : int = pipeline(task="text-generation" , model=A , tokenizer=A , return_full_text=A )
_UpperCAmelCase : Optional[Any] = text_generator("This is a test" )
self.assertEqual(A , [{"generated_text": ANY(A )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
_UpperCAmelCase : str = text_generator("This is a test" , return_full_text=A )
self.assertEqual(A , [{"generated_text": ANY(A )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
_UpperCAmelCase : Dict = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=A )
self.assertEqual(
A , [
[{"generated_text": ANY(A )}, {"generated_text": ANY(A )}],
[{"generated_text": ANY(A )}, {"generated_text": ANY(A )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_UpperCAmelCase : Optional[Any] = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=A )
self.assertEqual(
A , [
[{"generated_text": ANY(A )}, {"generated_text": ANY(A )}],
[{"generated_text": ANY(A )}, {"generated_text": ANY(A )}],
] , )
with self.assertRaises(A ):
_UpperCAmelCase : List[str] = text_generator("test" , return_full_text=A , return_text=A )
with self.assertRaises(A ):
_UpperCAmelCase : Tuple = text_generator("test" , return_full_text=A , return_tensors=A )
with self.assertRaises(A ):
_UpperCAmelCase : List[str] = text_generator("test" , return_text=A , return_tensors=A )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_UpperCAmelCase : Any = text_generator("" )
self.assertEqual(A , [{"generated_text": ANY(A )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_UpperCAmelCase : Dict = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_UpperCAmelCase : Any = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500 , max_new_tokens=20 )
_UpperCAmelCase : Any = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(A ):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _A ( self : str ):
import torch
# Classic `model_kwargs`
_UpperCAmelCase : Any = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCAmelCase : List[str] = pipe("This is a test" )
self.assertEqual(
A , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_UpperCAmelCase : Tuple = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCAmelCase : List[Any] = pipe("This is a test" )
self.assertEqual(
A , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_UpperCAmelCase : Tuple = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_UpperCAmelCase : Optional[Any] = pipe("This is a test" )
self.assertEqual(
A , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def _A ( self : Tuple ):
import torch
_UpperCAmelCase : Optional[int] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def _A ( self : Optional[Any] ):
import torch
_UpperCAmelCase : int = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=A , top_p=0.5 )
def _A ( self : str ):
_UpperCAmelCase : Any = "Hello world"
_UpperCAmelCase : Any = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
_UpperCAmelCase : Tuple = logging.get_logger("transformers.generation.tf_utils" )
else:
_UpperCAmelCase : Optional[Any] = logging.get_logger("transformers.generation.utils" )
_UpperCAmelCase : int = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(A ) as cl:
_UpperCAmelCase : int = text_generator(A , max_length=10 , max_new_tokens=1 )
self.assertIn(A , cl.out )
# The user only sets one -> no warning
with CaptureLogger(A ) as cl:
_UpperCAmelCase : Tuple = text_generator(A , max_new_tokens=1 )
self.assertNotIn(A , cl.out )
with CaptureLogger(A ) as cl:
_UpperCAmelCase : List[str] = text_generator(A , max_length=10 )
self.assertNotIn(A , cl.out )
| 31
|
'''simple docstring'''
__a = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__a = frozenset(["prompt", "negative_prompt"])
__a = frozenset([])
__a = frozenset(["image"])
__a = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["image"])
__a = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__a = frozenset(["prompt", "image", "negative_prompt"])
__a = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__a = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__a = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["image", "mask_image"])
__a = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["example_image", "image", "mask_image"])
__a = frozenset(["class_labels"])
__a = frozenset(["class_labels"])
__a = frozenset(["batch_size"])
__a = frozenset([])
__a = frozenset(["batch_size"])
__a = frozenset([])
__a = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__a = frozenset(["prompt", "negative_prompt"])
__a = frozenset(["input_tokens"])
__a = frozenset(["input_tokens"])
| 35
| 0
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCAmelCase_ : Union[str, Any] = datasets.logging.get_logger(__name__)
UpperCAmelCase_ : Dict = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
UpperCAmelCase_ : str = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
UpperCAmelCase_ : int = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : List[str] , __A : List[Any]=False , __A : Tuple=False , __A : Union[str, Any]=True , __A : List[str]=False , __A : Optional[int]="dummy_doc" ) -> Any:
"""simple docstring"""
a_ : Any = {doc: key_lines}
a_ : List[Any] = {doc: sys_lines}
a_ : Union[str, Any] = {}
a_ : int = 0
a_ : List[Any] = 0
a_ : Union[str, Any] = 0
a_ : Union[str, Any] = 0
a_ : int = 0
a_ : Optional[int] = 0
a_ , a_ : Optional[int] = reader.get_doc_mentions(__A , key_doc_lines[doc] , __A )
key_singletons_num += singletons_num
if NP_only or min_span:
a_ : List[Any] = reader.set_annotated_parse_trees(__A , key_doc_lines[doc] , __A , __A )
a_ , a_ : List[str] = reader.get_doc_mentions(__A , sys_doc_lines[doc] , __A )
sys_singletons_num += singletons_num
if NP_only or min_span:
a_ : int = reader.set_annotated_parse_trees(__A , key_doc_lines[doc] , __A , __A )
if remove_nested:
a_ , a_ : Union[str, Any] = reader.remove_nested_coref_mentions(__A , __A )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
a_ , a_ : Union[str, Any] = reader.remove_nested_coref_mentions(__A , __A )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
a_ : Any = reader.get_mention_assignments(__A , __A )
a_ : List[str] = reader.get_mention_assignments(__A , __A )
a_ : Dict = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : str , __A : str , __A : List[str] , __A : Any , __A : Optional[int] , __A : Dict ) -> Optional[int]:
"""simple docstring"""
a_ : Tuple = get_coref_infos(__A , __A , __A , __A , __A , __A )
a_ : Optional[int] = {}
a_ : Union[str, Any] = 0
a_ : str = 0
for name, metric in metrics:
a_ , a_ , a_ : Any = evaluator.evaluate_documents(__A , __A , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 1_00:.2f}""" , F""" Precision: {precision * 1_00:.2f}""" , F""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
a_ : List[Any] = (conll / 3) * 1_00
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
a_ : Union[str, Any] = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
a_ : str = line.split()[5]
if not parse_col == "-":
a_ : Optional[int] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> List[str]:
a_ : str = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
a_ : Tuple = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE__ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
a_ : List[str] = evaluate(
key_lines=SCREAMING_SNAKE_CASE__ , sys_lines=SCREAMING_SNAKE_CASE__ , metrics=SCREAMING_SNAKE_CASE__ , NP_only=SCREAMING_SNAKE_CASE__ , remove_nested=SCREAMING_SNAKE_CASE__ , keep_singletons=SCREAMING_SNAKE_CASE__ , min_span=SCREAMING_SNAKE_CASE__ , )
return score
| 32
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = GPTSanJapaneseTokenizer
lowercase = False
lowercase = {"do_clean_text": False, "add_prefix_space": False}
def lowerCamelCase ( self : str ):
super().setUp()
# fmt: off
snake_case__ : Optional[Any] = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
snake_case__ : List[Any] = {"""unk_token""": """<unk>"""}
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(snake_case_ ) )
def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : str ):
snake_case__ : Union[str, Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def lowerCamelCase ( self : Any , snake_case_ : Dict ):
snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ )
snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ )
return text, ids
def lowerCamelCase ( self : Optional[Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Union[str, Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : List[str] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : int = """こんにちは、世界。 こんばんは、㔺界。"""
snake_case__ : Optional[int] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
snake_case__ : Dict = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids without special tokens
snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids with special tokens
snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token]
snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : Union[str, Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
snake_case__ : Optional[int] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
snake_case__ : Any = tokenizer.encode(snake_case_ )
snake_case__ : int = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Tuple = """こんにちは、世界。"""
snake_case__ : Optional[Any] = """こんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。こんばんは、世界。😀"""
snake_case__ : Dict = tokenizer.encode(prefix_text + input_text )
snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ )
snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ )
snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ )
snake_case__ : str = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Dict = """こんにちは、世界。"""
snake_case__ : Optional[int] = """こんばんは、㔺界。😀"""
snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1)
snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0]
snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids
snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : Union[str, Any] = tokenizer.encode("""あンいワ""" )
snake_case__ : int = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
snake_case__ : Dict = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ )
snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ )
# fmt: off
snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , snake_case_ )
self.assertListEqual(x_token.token_type_ids , snake_case_ )
self.assertListEqual(x_token.attention_mask , snake_case_ )
self.assertListEqual(x_token_a.input_ids , snake_case_ )
self.assertListEqual(x_token_a.token_type_ids , snake_case_ )
self.assertListEqual(x_token_a.attention_mask , snake_case_ )
def lowerCamelCase ( self : Any ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase ( self : List[str] ):
# tokenizer has no padding token
pass
| 35
| 0
|
"""simple docstring"""
from functools import reduce
__A : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def lowercase ( __snake_case : str = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __snake_case , __snake_case : str(int(__snake_case ) * int(__snake_case ) ) , n[i : i + 1_3] ) )
for i in range(len(__snake_case ) - 1_2 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33
|
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = CustomTokenizer
pass
| 35
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class _a :
def __init__( self : Union[str, Any] , lowercase : int , lowercase : MutableSequence[float] ):
'''simple docstring'''
if len(lowercase ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
UpperCAmelCase = list(lowercase )
UpperCAmelCase = degree
def __add__( self : List[Any] , lowercase : Polynomial ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
UpperCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowercase )
else:
UpperCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowercase )
def __sub__( self : str , lowercase : Polynomial ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Optional[int] ):
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : List[Any] , lowercase : Polynomial ):
'''simple docstring'''
UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowercase )
def A ( self : Optional[int] , lowercase : int | float ):
'''simple docstring'''
UpperCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : str ):
'''simple docstring'''
UpperCAmelCase = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowercase )
return polynomial
def __repr__( self : List[Any] ):
'''simple docstring'''
return self.__str__()
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowercase )
def A ( self : str , lowercase : int | float = 0 ):
'''simple docstring'''
UpperCAmelCase = [0] * (self.degree + 2)
UpperCAmelCase = constant
for i in range(self.degree + 1 ):
UpperCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowercase )
def __eq__( self : List[Any] , lowercase : object ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , lowercase : object ):
'''simple docstring'''
return not self.__eq__(lowercase )
| 34
|
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Optional[Any] = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase )
snake_case__ : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : int ):
snake_case__ : Optional[int] = {}
if "second_text" in kwargs:
snake_case__ : Union[str, Any] = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ):
return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework )
def lowerCamelCase ( self : List[Any] , snake_case_ : Dict ):
return self.model(**snake_case_ )
def lowerCamelCase ( self : int , snake_case_ : List[Any] ):
snake_case__ : Union[str, Any] = model_outputs.logits[0].numpy()
snake_case__ : List[str] = softmax(snake_case_ )
snake_case__ : List[str] = np.argmax(snake_case_ )
snake_case__ : List[str] = self.model.config.idalabel[best_class]
snake_case__ : Optional[int] = probabilities[best_class].item()
snake_case__ : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 35
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 36
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __snake_case( _lowerCAmelCase ) -> Any:
for i in range(0 , _lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def __snake_case( _lowerCAmelCase ) -> List[str]:
for i in range(_lowerCAmelCase , 0 , -1 ):
for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(_lowerCAmelCase ) # upper half
reverse_floyd(_lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
__a = 1
while K:
__a = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__a = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 35
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[Any] = '''focalnet'''
def __init__( self ,__UpperCAmelCase=224 ,__UpperCAmelCase=4 ,__UpperCAmelCase=3 ,__UpperCAmelCase=96 ,__UpperCAmelCase=False ,__UpperCAmelCase=[192, 384, 768, 768] ,__UpperCAmelCase=[2, 2, 6, 2] ,__UpperCAmelCase=[2, 2, 2, 2] ,__UpperCAmelCase=[3, 3, 3, 3] ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=4.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=False ,__UpperCAmelCase=1E-4 ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=32 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Optional[Any]:
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Dict = image_size
lowerCAmelCase__ : int = patch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : Dict = embed_dim
lowerCAmelCase__ : List[str] = use_conv_embed
lowerCAmelCase__ : List[Any] = hidden_sizes
lowerCAmelCase__ : Dict = depths
lowerCAmelCase__ : List[str] = focal_levels
lowerCAmelCase__ : List[str] = focal_windows
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Dict = mlp_ratio
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : Tuple = drop_path_rate
lowerCAmelCase__ : Dict = use_layerscale
lowerCAmelCase__ : Optional[Any] = layerscale_value
lowerCAmelCase__ : str = use_post_layernorm
lowerCAmelCase__ : Union[str, Any] = use_post_layernorm_in_modulation
lowerCAmelCase__ : int = normalize_modulator
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : List[Any] = encoder_stride
lowerCAmelCase__ : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Any = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase ,out_indices=__UpperCAmelCase ,stage_names=self.stage_names )
| 37
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase = 1_000 ) -> int:
return sum(e for e in range(3 , _lowerCAmelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 35
| 0
|
from __future__ import annotations
UpperCAmelCase_ : List[Any] = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Matrix , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__magic_name__ ):
UpperCamelCase , UpperCamelCase :Tuple = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
UpperCamelCase :Dict = digit
if sudoku(__magic_name__ ) is not None:
return grid
UpperCamelCase :List[str] = 0
return None
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__magic_name__ , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
UpperCAmelCase_ : Union[str, Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 38
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
| 0
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_a = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
_a = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
_a = spec.loader.load_module()
_a = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_a = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
_a = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def __A ( )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
_UpperCAmelCase = False
# source code of `config_class`
_UpperCAmelCase = inspect.getsource(__lowerCAmelCase )
_UpperCAmelCase = _re_checkpoint.findall(__lowerCAmelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_UpperCAmelCase , _UpperCAmelCase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase = True
break
_UpperCAmelCase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_UpperCAmelCase = '\n'.join(sorted(__lowerCAmelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 39
|
'''simple docstring'''
from PIL import Image
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Image:
def brightness(_lowerCAmelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
__a = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 35
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowercase = logging.get_logger(__name__)
class _A ( _a ,_a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = """maskformer-swin"""
UpperCAmelCase : Optional[int] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , __UpperCAmelCase : List[Any]=224 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : int=3 , __UpperCAmelCase : int=96 , __UpperCAmelCase : Any=[2, 2, 6, 2] , __UpperCAmelCase : Tuple=[3, 6, 12, 24] , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : Dict=4.0 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : Optional[Any]=0.0 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Any=False , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Dict=1e-5 , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=None , **__UpperCAmelCase : List[str] , ):
super().__init__(**__UpperCAmelCase)
a : int = image_size
a : str = patch_size
a : Optional[int] = num_channels
a : str = embed_dim
a : int = depths
a : Dict = len(__UpperCAmelCase)
a : Dict = num_heads
a : Union[str, Any] = window_size
a : Optional[Any] = mlp_ratio
a : Any = qkv_bias
a : str = hidden_dropout_prob
a : List[str] = attention_probs_dropout_prob
a : Optional[int] = drop_path_rate
a : List[str] = hidden_act
a : int = use_absolute_embeddings
a : int = layer_norm_eps
a : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a : Dict = int(embed_dim * 2 ** (len(__UpperCAmelCase) - 1))
a : List[Any] = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(__UpperCAmelCase) + 1)]
a , a : int = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names)
| 40
|
'''simple docstring'''
import argparse
import os
import re
__a = "src/transformers"
# Pattern that looks at the indentation in a line.
__a = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
__a = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__a = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
__a = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__a = re.compile(R"\[([^\]]+)\]")
def __snake_case( _lowerCAmelCase ) -> List[Any]:
snake_case__ : int = _re_indent.search(_lowerCAmelCase )
return "" if search is None else search.groups()[0]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
snake_case__ : str = 0
snake_case__ : Union[str, Any] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCAmelCase ):
index += 1
snake_case__ : Tuple = ["""\n""".join(lines[:index] )]
else:
snake_case__ : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case__ : Optional[int] = [lines[index]]
index += 1
while index < len(_lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCAmelCase ) )
if index < len(_lowerCAmelCase ) - 1:
snake_case__ : str = [lines[index + 1]]
index += 1
else:
snake_case__ : int = []
else:
blocks.append("""\n""".join(_lowerCAmelCase ) )
snake_case__ : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCAmelCase ) > 0:
blocks.append("""\n""".join(_lowerCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCAmelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __snake_case( _lowerCAmelCase ) -> Tuple:
def _inner(_lowerCAmelCase ):
return key(_lowerCAmelCase ).lower().replace("""_""" , """""" )
return _inner
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(_lowerCAmelCase ):
return x
if key is None:
snake_case__ : Optional[int] = noop
# Constants are all uppercase, they go first.
snake_case__ : Optional[int] = [obj for obj in objects if key(_lowerCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case__ : int = [obj for obj in objects if key(_lowerCAmelCase )[0].isupper() and not key(_lowerCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case__ : str = [obj for obj in objects if not key(_lowerCAmelCase )[0].isupper()]
snake_case__ : List[str] = ignore_underscore(_lowerCAmelCase )
return sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> int:
# This inner function sort imports between [ ].
def _replace(_lowerCAmelCase ):
snake_case__ : Union[str, Any] = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
snake_case__ : int = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : List[str] = keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) + "]"
snake_case__ : str = import_statement.split("""\n""" )
if len(_lowerCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case__ : Dict = 2 if lines[1].strip() == """[""" else 1
snake_case__ : str = [(i, _re_strip_line.search(_lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case__ : str = sort_objects(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )
snake_case__ : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
snake_case__ : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : List[str] = keys[:-1]
snake_case__ : int = get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] )
return "\n".join(_lowerCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
snake_case__ : Optional[Any] = _re_bracket_content.sub(_replace , _lowerCAmelCase )
return import_statement
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=True ) -> Dict:
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
snake_case__ : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case__ : Optional[int] = split_code_in_indented_blocks(
_lowerCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case__ : Optional[Any] = main_blocks[block_idx]
snake_case__ : Dict = block.split("""\n""" )
# Get to the start of the imports.
snake_case__ : Dict = 0
while line_idx < len(_lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case__ : Union[str, Any] = len(_lowerCAmelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case__ : List[str] = """\n""".join(block_lines[line_idx:-1] )
snake_case__ : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case__ : Optional[int] = split_code_in_indented_blocks(_lowerCAmelCase , indent_level=_lowerCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case__ : Tuple = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case__ : Optional[Any] = [(pattern.search(_lowerCAmelCase ).groups()[0] if pattern.search(_lowerCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case__ : Dict = [(i, key) for i, key in enumerate(_lowerCAmelCase ) if key is not None]
snake_case__ : Union[str, Any] = [x[0] for x in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case__ : List[Any] = 0
snake_case__ : Optional[Any] = []
for i in range(len(_lowerCAmelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
snake_case__ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_lowerCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
snake_case__ : Dict = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCAmelCase ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(_lowerCAmelCase ) )
def __snake_case( _lowerCAmelCase=True ) -> Tuple:
snake_case__ : str = []
for root, _, files in os.walk(_lowerCAmelCase ):
if "__init__.py" in files:
snake_case__ : Union[str, Any] = sort_imports(os.path.join(_lowerCAmelCase , """__init__.py""" ) , check_only=_lowerCAmelCase )
if result:
snake_case__ : Union[str, Any] = [os.path.join(_lowerCAmelCase , """__init__.py""" )]
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"Would overwrite {len(_lowerCAmelCase )} files, run `make style`." )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__a = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 35
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A : int ={
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[str] =[
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
| 0
|
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowercase : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Union[str, Any]:
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = None ) -> Any:
_snake_case = tesseract_config if tesseract_config is not None else ''
# apply OCR
_snake_case = to_pil_image(__A )
_snake_case , _snake_case = pil_image.size
_snake_case = pytesseract.image_to_data(__A , lang=__A , output_type='dict' , config=__A )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
_snake_case = [idx for idx, word in enumerate(__A ) if not word.strip()]
_snake_case = [word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_snake_case = []
for x, y, w, h in zip(__A , __A , __A , __A ):
_snake_case = [x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
_snake_case = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = ["""pixel_values"""]
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = "" , **lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_snake_case = size if size is not None else {'height': 2_24, 'width': 2_24}
_snake_case = get_size_dict(lowerCAmelCase_ )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = apply_ocr
_snake_case = ocr_lang
_snake_case = tesseract_config
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
_snake_case = (size['height'], size['width'])
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCAmelCase_ )
_snake_case = resample if resample is not None else self.resample
_snake_case = apply_ocr if apply_ocr is not None else self.apply_ocr
_snake_case = ocr_lang if ocr_lang is not None else self.ocr_lang
_snake_case = tesseract_config if tesseract_config is not None else self.tesseract_config
_snake_case = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
_snake_case = []
_snake_case = []
for image in images:
_snake_case , _snake_case = apply_tesseract(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
words_batch.append(lowerCAmelCase_ )
boxes_batch.append(lowerCAmelCase_ )
if do_resize:
_snake_case = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_snake_case = [flip_channel_order(lowerCAmelCase_ ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_snake_case = BatchFeature(data={'pixel_values': images} , tensor_type=lowerCAmelCase_ )
if apply_ocr:
_snake_case = words_batch
_snake_case = boxes_batch
return data
| 42
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__a = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
snake_case__ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
snake_case__ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
snake_case__ : int = value
elif weight_type == "weight_g":
snake_case__ : List[str] = value
elif weight_type == "weight_v":
snake_case__ : List[str] = value
elif weight_type == "bias":
snake_case__ : Optional[Any] = value
else:
snake_case__ : str = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
snake_case__ : Union[str, Any] = []
snake_case__ : Dict = fairseq_model.state_dict()
snake_case__ : List[Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case__ : Optional[int] = None
for name, value in fairseq_dict.items():
snake_case__ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case__ : Union[str, Any] = True
elif name.split(""".""" )[0] == "proj":
snake_case__ : Tuple = fairseq_model.proj
snake_case__ : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case__ : Optional[Any] = True
if "*" in mapped_key:
snake_case__ : Optional[int] = name.split(_lowerCAmelCase )[0].split(""".""" )[-2]
snake_case__ : Tuple = mapped_key.replace("""*""" , _lowerCAmelCase )
if "weight_g" in name:
snake_case__ : str = """weight_g"""
elif "weight_v" in name:
snake_case__ : int = """weight_v"""
elif "bias" in name:
snake_case__ : Dict = """bias"""
elif "weight" in name:
snake_case__ : Union[str, Any] = """weight"""
else:
snake_case__ : Union[str, Any] = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
snake_case__ : int = full_name.split("""conv_layers.""" )[-1]
snake_case__ : Dict = name.split(""".""" )
snake_case__ : Any = int(items[0] )
snake_case__ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
snake_case__ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
snake_case__ : str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
snake_case__ : Union[str, Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
snake_case__ : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ , snake_case__ : str = emb.weight.shape
snake_case__ : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
snake_case__ : List[str] = emb.weight.data
return lin_layer
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
snake_case__ : int = f.readlines()
snake_case__ : List[Any] = [line.split(""" """ )[0] for line in lines]
snake_case__ : Union[str, Any] = len(_lowerCAmelCase )
snake_case__ : Any = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> int:
snake_case__ : Optional[Any] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
snake_case__ : Optional[Any] = SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase )
snake_case__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
# set weights for wav2vec2 encoder
snake_case__ : Optional[Any] = WavaVecaModel(_lowerCAmelCase )
snake_case__ : Dict = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
snake_case__ : Optional[Any] = SpeechaTextaForCausalLM(_lowerCAmelCase )
snake_case__ , snake_case__ : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
snake_case__ : Tuple = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
snake_case__ : List[Any] = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
snake_case__ : Tuple = False
# add projection layer
snake_case__ : Union[str, Any] = nn.Parameter(projection_layer.weight )
snake_case__ : int = nn.Parameter(projection_layer.bias )
snake_case__ : Tuple = create_vocab_dict(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , """vocab.json""" ) , """w""" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Tuple = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , """vocab.json""" ) )
tokenizer.save_pretrained(_lowerCAmelCase )
snake_case__ : Optional[Any] = hf_wavavec.config.to_dict()
snake_case__ : Tuple = tokenizer.pad_token_id
snake_case__ : Optional[Any] = tokenizer.bos_token_id
snake_case__ : int = tokenizer.eos_token_id
snake_case__ : str = """speech_to_text_2"""
snake_case__ : List[Any] = """wav2vec2"""
snake_case__ : List[str] = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 35
| 0
|
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__lowercase = 2048
__lowercase = 4096
__lowercase = 42
__lowercase = os.environ.pop('''PROCESS_TRAIN''', '''false''')
__lowercase = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def choose_first(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 1:
__UpperCamelCase :str = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__UpperCamelCase :List[Any] = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
__UpperCamelCase :Union[str, Any] = {'''id''': example['''id''']}
__UpperCamelCase :Optional[Any] = example['''annotations''']
__UpperCamelCase :Tuple = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
__UpperCamelCase :List[Any] = ['''yes'''] if 1 in yes_no_answer else ['''no''']
__UpperCamelCase :Tuple = []
__UpperCamelCase :List[Any] = []
__UpperCamelCase :Tuple = ['''<cls>''']
else:
__UpperCamelCase :Optional[Any] = ['''short''']
__UpperCamelCase :Union[str, Any] = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
__UpperCamelCase :int = ['''long''']
__UpperCamelCase :List[Any] = choose_first(annotation['''long_answer'''] , is_long_answer=SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = []
answer.update(SCREAMING_SNAKE_CASE )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
__UpperCamelCase :Any = True
else:
__UpperCamelCase :str = False
__UpperCamelCase :Tuple = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] , SCREAMING_SNAKE_CASE ) for k in cols ):
raise ValueError('''Issue in ID''' , example['''id'''] )
return answer
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__UpperCamelCase :Tuple = _get_single_answer(SCREAMING_SNAKE_CASE )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__UpperCamelCase :str = example['''document''']['''tokens''']
__UpperCamelCase :Any = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(SCREAMING_SNAKE_CASE ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__UpperCamelCase :Optional[int] = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__UpperCamelCase :Optional[Any] = example['''document''']['''tokens''']
__UpperCamelCase :List[Any] = answer['''start_token''']
__UpperCamelCase :Dict = answer['''end_token''']
__UpperCamelCase :Tuple = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__UpperCamelCase :Optional[int] = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
__UpperCamelCase :str = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
__UpperCamelCase :Union[str, Any] = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
__UpperCamelCase :Dict = ''' '''.join([old[i] for i in range(len(SCREAMING_SNAKE_CASE ) ) if not is_html[i]] )
if new != old:
print('''ID:''' , example['''id'''] )
print('''New:''' , SCREAMING_SNAKE_CASE , end='''\n''' )
print('''Old:''' , SCREAMING_SNAKE_CASE , end='''\n\n''' )
return {
"context": " ".join(SCREAMING_SNAKE_CASE ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2_048 , SCREAMING_SNAKE_CASE=4_096 , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = get_context_and_ans(SCREAMING_SNAKE_CASE , assertion=SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__UpperCamelCase :Optional[int] = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids
__UpperCamelCase :Optional[int] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__UpperCamelCase :int = []
__UpperCamelCase :Any = []
__UpperCamelCase :Tuple = input_ids[:q_len]
__UpperCamelCase :Any = range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) , max_length - doc_stride )
for i in doc_start_indices:
__UpperCamelCase :Tuple = i + max_length - q_len
__UpperCamelCase :Any = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(SCREAMING_SNAKE_CASE ),
"end_token": [-100] * len(SCREAMING_SNAKE_CASE ),
"category": category,
},
}
__UpperCamelCase :Tuple = out['''context'''].split()
__UpperCamelCase :Optional[Any] = splitted_context[answer['''end_token''']]
__UpperCamelCase :List[str] = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=SCREAMING_SNAKE_CASE , ).input_ids )
__UpperCamelCase :Dict = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=SCREAMING_SNAKE_CASE ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__UpperCamelCase :Any = len(tokenizer(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__UpperCamelCase :Optional[Any] = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
__UpperCamelCase :List[Any] = answer['''start_token''']
__UpperCamelCase :Optional[Any] = answer['''end_token''']
if assertion:
__UpperCamelCase :Optional[int] = tokenizer.decode(SCREAMING_SNAKE_CASE )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''' , answer['''span'''] )
print('''NEW:''' , SCREAMING_SNAKE_CASE , end='''\n\n''' )
if len(SCREAMING_SNAKE_CASE ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__UpperCamelCase :List[str] = input_ids[:q_len]
__UpperCamelCase :Optional[Any] = range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) , max_length - doc_stride )
__UpperCamelCase :str = []
__UpperCamelCase :List[str] = []
__UpperCamelCase :str = []
__UpperCamelCase :List[Any] = [] # null, yes, no, long, short
for i in doc_start_indices:
__UpperCamelCase :str = i + max_length - q_len
__UpperCamelCase :str = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__UpperCamelCase :Tuple = start_token - i + q_len
__UpperCamelCase :List[str] = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
__UpperCamelCase :Any = -100
__UpperCamelCase :Union[str, Any] = -100
answers_category.append('''null''' )
__UpperCamelCase :str = inputs[-1][start_token : end_token + 1]
answers_start_token.append(SCREAMING_SNAKE_CASE )
answers_end_token.append(SCREAMING_SNAKE_CASE )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' , example['''id'''] )
print('''New:''' , tokenizer.decode(SCREAMING_SNAKE_CASE ) )
print('''Old:''' , tokenizer.decode(SCREAMING_SNAKE_CASE ) , end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2_048 , SCREAMING_SNAKE_CASE=4_096 , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__UpperCamelCase :Any = get_strided_contexts_and_ans(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , doc_stride=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , assertion=SCREAMING_SNAKE_CASE , )
return example
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with jsonlines.open(SCREAMING_SNAKE_CASE , '''a''' ) as writer:
for example in tqdm(SCREAMING_SNAKE_CASE , total=len(SCREAMING_SNAKE_CASE ) , desc='''Saving samples ... ''' ):
__UpperCamelCase :Union[str, Any] = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__lowercase = load_dataset('''natural_questions''')
__lowercase = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
__lowercase = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
__lowercase = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
__lowercase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__lowercase = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
__lowercase = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 43
|
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Optional[int] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f"{test_file} instead." )
snake_case__ : Dict = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
snake_case__ : int = components[:-1] + [test_fn.replace(""".py""" , """""" )]
snake_case__ : int = """.""".join(_lowerCAmelCase )
return test_module_path
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ : str = get_module_path(_lowerCAmelCase )
snake_case__ : Union[str, Any] = importlib.import_module(_lowerCAmelCase )
return test_module
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : List[Any] = []
snake_case__ : Optional[int] = get_test_module(_lowerCAmelCase )
for attr in dir(_lowerCAmelCase ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : List[str] = []
snake_case__ : Any = get_test_module(_lowerCAmelCase )
for attr in dir(_lowerCAmelCase ):
snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
snake_case__ : List[str] = getattr(_lowerCAmelCase , """all_model_classes""" , [] )
if len(_lowerCAmelCase ) > 0:
test_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : Any = get_test_classes(_lowerCAmelCase )
snake_case__ : Optional[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
snake_case__ : Optional[int] = test_class()
if hasattr(_lowerCAmelCase , """setUp""" ):
test.setUp()
snake_case__ : Any = None
if hasattr(_lowerCAmelCase , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
snake_case__ : Tuple = test.model_tester.__class__
return model_tester
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
snake_case__ : Union[str, Any] = get_test_classes(_lowerCAmelCase )
snake_case__ : str = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
snake_case__ : Optional[Any] = get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Union[str, Any] = []
for test_class in test_classes:
snake_case__ : Tuple = get_model_tester_from_test_class(_lowerCAmelCase )
if tester_class is not None:
tester_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
snake_case__ : Optional[Any] = get_test_classes(_lowerCAmelCase )
snake_case__ : Union[str, Any] = {test_class: get_model_tester_from_test_class(_lowerCAmelCase ) for test_class in test_classes}
return test_tester_mapping
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Any = get_model_classes(_lowerCAmelCase )
snake_case__ : Any = {
model_class: get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes
}
return model_test_mapping
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Union[str, Any] = get_model_classes(_lowerCAmelCase )
snake_case__ : str = {
model_class: get_tester_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def __snake_case( _lowerCAmelCase ) -> int:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return o
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return o.__name__
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return [to_json(_lowerCAmelCase ) for x in o]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {to_json(_lowerCAmelCase ): to_json(_lowerCAmelCase ) for k, v in o.items()}
else:
return o
| 35
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : str = 1
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 ,_lowerCamelCase ):
_lowerCAmelCase : str = 0
_lowerCAmelCase : int = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Optional[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Union[str, Any] = counter
if counter > pre_counter:
_lowerCAmelCase : List[str] = inputa
_lowerCAmelCase : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 44
|
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __snake_case( _lowerCAmelCase ) -> List[Any]:
snake_case__ : Dict = SwinConfig()
snake_case__ : Optional[Any] = swin_name.split("""_""" )
snake_case__ : Any = name_split[1]
snake_case__ : List[Any] = int(name_split[4] )
snake_case__ : int = int(name_split[3][-1] )
if model_size == "tiny":
snake_case__ : List[Any] = 96
snake_case__ : int = (2, 2, 6, 2)
snake_case__ : int = (3, 6, 12, 24)
elif model_size == "small":
snake_case__ : Union[str, Any] = 96
snake_case__ : Optional[Any] = (2, 2, 18, 2)
snake_case__ : str = (3, 6, 12, 24)
elif model_size == "base":
snake_case__ : Dict = 128
snake_case__ : str = (2, 2, 18, 2)
snake_case__ : Dict = (4, 8, 16, 32)
else:
snake_case__ : List[str] = 192
snake_case__ : str = (2, 2, 18, 2)
snake_case__ : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
snake_case__ : str = 21_841
else:
snake_case__ : List[str] = 1_000
snake_case__ : int = """huggingface/label-files"""
snake_case__ : Any = """imagenet-1k-id2label.json"""
snake_case__ : List[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : Dict = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : Optional[int] = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case__ : List[Any] = img_size
snake_case__ : Dict = num_classes
snake_case__ : Dict = embed_dim
snake_case__ : Optional[int] = depths
snake_case__ : int = num_heads
snake_case__ : Optional[int] = window_size
return config
def __snake_case( _lowerCAmelCase ) -> Dict:
if "patch_embed.proj" in name:
snake_case__ : List[str] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case__ : int = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
snake_case__ : str = """encoder.""" + name
if "attn.proj" in name:
snake_case__ : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
snake_case__ : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
snake_case__ : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case__ : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case__ : Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
snake_case__ : Tuple = """layernorm.weight"""
if name == "norm.bias":
snake_case__ : Union[str, Any] = """layernorm.bias"""
if "head" in name:
snake_case__ : Optional[int] = name.replace("""head""" , """classifier""" )
else:
snake_case__ : List[str] = """swin.""" + name
return name
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case__ : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
snake_case__ : Dict = key.split(""".""" )
snake_case__ : Optional[int] = int(key_split[1] )
snake_case__ : Union[str, Any] = int(key_split[3] )
snake_case__ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case__ : Optional[Any] = val[:dim, :]
snake_case__ : Tuple = val[
dim : dim * 2, :
]
snake_case__ : Dict = val[-dim:, :]
else:
snake_case__ : Tuple = val[
:dim
]
snake_case__ : int = val[
dim : dim * 2
]
snake_case__ : int = val[
-dim:
]
else:
snake_case__ : Union[str, Any] = val
return orig_state_dict
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : Optional[int] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
snake_case__ : Optional[int] = get_swin_config(_lowerCAmelCase )
snake_case__ : Optional[Any] = SwinForImageClassification(_lowerCAmelCase )
model.eval()
snake_case__ : str = convert_state_dict(timm_model.state_dict() , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
snake_case__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Dict = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
snake_case__ : Dict = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
snake_case__ : Optional[int] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
snake_case__ : Optional[Any] = timm_model(inputs["""pixel_values"""] )
snake_case__ : str = model(**_lowerCAmelCase ).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print(f"Saving model {swin_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 35
| 0
|
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowerCAmelCase :
'''simple docstring'''
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=_a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
__a = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=_a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
__a = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCAmelCase ( self ):
__a = self.get_dummy_components()
__a = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__a = self.get_dummy_inputs(_a )
__a = inputs['''prompt''']
__a = inputs['''generator''']
__a = inputs['''num_inference_steps''']
__a = inputs['''output_type''']
if "image" in inputs:
__a = inputs['''image''']
else:
__a = None
if "mask_image" in inputs:
__a = inputs['''mask_image''']
else:
__a = None
if "original_image" in inputs:
__a = inputs['''original_image''']
else:
__a = None
__a , __a = pipe.encode_prompt(_a )
# inputs with prompt converted to embeddings
__a = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
__a = image
if mask_image is not None:
__a = mask_image
if original_image is not None:
__a = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_a , _a , _a )
__a = pipe(**_a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_a )
__a = self.pipeline_class.from_pretrained(_a )
pipe_loaded.to(_a )
pipe_loaded.set_progress_bar_config(disable=_a )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_a , _a ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
__a = self.get_dummy_inputs(_a )
__a = inputs['''generator''']
__a = inputs['''num_inference_steps''']
__a = inputs['''output_type''']
# inputs with prompt converted to embeddings
__a = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
__a = image
if mask_image is not None:
__a = mask_image
if original_image is not None:
__a = original_image
__a = pipe_loaded(**_a )[0]
__a = np.abs(to_np(_a ) - to_np(_a ) ).max()
self.assertLess(_a , 1E-4 )
def __UpperCAmelCase ( self ):
__a = self.get_dummy_components()
__a = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__a = self.get_dummy_inputs(_a )
__a = pipe(**_a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_a )
__a = self.pipeline_class.from_pretrained(_a )
pipe_loaded.to(_a )
pipe_loaded.set_progress_bar_config(disable=_a )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__a = self.get_dummy_inputs(_a )
__a = pipe_loaded(**_a )[0]
__a = np.abs(to_np(_a ) - to_np(_a ) ).max()
self.assertLess(_a , 1E-4 )
| 45
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__a = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : List[str] , *snake_case_ : str , **snake_case_ : List[str] ):
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 35
| 0
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def _snake_case ( *lowercase , **lowercase ) -> Union[str, Any]:
pass
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Image ):
'''simple docstring'''
lowerCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Image ):
'''simple docstring'''
lowerCAmelCase = np.array(SCREAMING_SNAKE_CASE )
lowerCAmelCase = npimg.shape
return {"hash": hashimage(SCREAMING_SNAKE_CASE ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_SCREAMING_SNAKE_CASE = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> Optional[Any]:
lowerCAmelCase = MaskGenerationPipeline(model=lowercase , image_processor=lowercase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _snake_case ( self , lowercase , lowercase ) -> List[Any]:
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def _snake_case ( self ) -> Dict:
pass
@slow
@require_torch
def _snake_case ( self ) -> str:
lowerCAmelCase = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
lowerCAmelCase = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
lowerCAmelCase = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(lowercase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0_444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0_167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0_132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0_053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9_967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9_909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9_879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9_834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9_716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9_612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9_599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9_552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9_532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9_516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9_499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9_483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9_464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9_408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9_335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9_326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9_262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8_999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8_986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8_984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8_873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = """facebook/sam-vit-huge"""
lowerCAmelCase = pipeline("""mask-generation""" , model=lowercase )
lowerCAmelCase = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
lowerCAmelCase = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(lowercase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0_444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0_210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0_167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0_132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0_053},
] , )
| 46
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__a = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = field(default=_a , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=_a , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=_a , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=_a , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=_a , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def lowerCamelCase ( self : List[str] ):
snake_case__ : int = super().to_dict()
for k, v in d.items():
if isinstance(snake_case_ , snake_case_ ):
snake_case__ : Optional[int] = v.to_dict()
return d
| 35
| 0
|
'''simple docstring'''
from collections import deque
class A__ :
def __init__( self : List[Any] , _a : str , _a : int , _a : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =process_name # process name
_SCREAMING_SNAKE_CASE =arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_SCREAMING_SNAKE_CASE =arrival_time
_SCREAMING_SNAKE_CASE =burst_time # remaining burst time
_SCREAMING_SNAKE_CASE =0 # total time of the process wait in ready queue
_SCREAMING_SNAKE_CASE =0 # time from arrival time to completion time
class A__ :
def __init__( self : List[str] , _a : int , _a : list[int] , _a : deque[Process] , _a : int , ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =number_of_queues
# time slice of queues that round robin algorithm applied
_SCREAMING_SNAKE_CASE =time_slices
# unfinished process is in this ready_queue
_SCREAMING_SNAKE_CASE =queue
# current time
_SCREAMING_SNAKE_CASE =current_time
# finished process is in this sequence queue
_SCREAMING_SNAKE_CASE =deque()
def A ( self : Union[str, Any] ) -> list[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def A ( self : Dict , _a : list[Process] ) -> list[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for i in range(len(_a ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def A ( self : List[str] , _a : list[Process] ) -> list[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for i in range(len(_a ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def A ( self : str , _a : list[Process] ) -> list[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for i in range(len(_a ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def A ( self : Tuple , _a : deque[Process] ) -> list[int]:
'''simple docstring'''
return [q.burst_time for q in queue]
def A ( self : Optional[int] , _a : Process ) -> int:
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def A ( self : Optional[int] , _a : deque[Process] ) -> deque[Process]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =deque() # sequence deque of finished process
while len(_a ) != 0:
_SCREAMING_SNAKE_CASE =ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_a )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_SCREAMING_SNAKE_CASE =0
# set the process's turnaround time because it is finished
_SCREAMING_SNAKE_CASE =self.current_time - cp.arrival_time
# set the completion time
_SCREAMING_SNAKE_CASE =self.current_time
# add the process to queue that has finished queue
finished.append(_a )
self.finish_queue.extend(_a ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def A ( self : Any , _a : deque[Process] , _a : int ) -> tuple[deque[Process], deque[Process]]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_a )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_SCREAMING_SNAKE_CASE =self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_a )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_SCREAMING_SNAKE_CASE =0
# set the finish time
_SCREAMING_SNAKE_CASE =self.current_time
# update the process' turnaround time because it is finished
_SCREAMING_SNAKE_CASE =self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_a )
self.finish_queue.extend(_a ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def A ( self : Any ) -> deque[Process]:
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCamelCase : Tuple = Process("P1", 0, 5_3)
lowerCamelCase : str = Process("P2", 0, 1_7)
lowerCamelCase : Any = Process("P3", 0, 6_8)
lowerCamelCase : Any = Process("P4", 0, 2_4)
lowerCamelCase : Optional[int] = 3
lowerCamelCase : Dict = [1_7, 2_5]
lowerCamelCase : int = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCamelCase : Optional[int] = Process("P1", 0, 5_3)
lowerCamelCase : List[str] = Process("P2", 0, 1_7)
lowerCamelCase : Union[str, Any] = Process("P3", 0, 6_8)
lowerCamelCase : List[str] = Process("P4", 0, 2_4)
lowerCamelCase : List[str] = 3
lowerCamelCase : Optional[int] = [1_7, 2_5]
lowerCamelCase : str = deque([Pa, Pa, Pa, Pa])
lowerCamelCase : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCamelCase : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
f'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 47
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
snake_case__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = """"""
else:
snake_case__ : Dict = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[int] = in_proj_bias[: config.hidden_size]
snake_case__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Tuple = in_proj_bias[-config.hidden_size :]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : str = dct.pop(_lowerCAmelCase )
snake_case__ : Tuple = val
def __snake_case( ) -> Tuple:
snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : Union[str, Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : int = 1_000
snake_case__ : Any = """huggingface/label-files"""
snake_case__ : Optional[Any] = """imagenet-1k-id2label.json"""
snake_case__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[str] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = int(deit_name[-6:-4] )
snake_case__ : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ : Tuple = 192
snake_case__ : Union[str, Any] = 768
snake_case__ : Tuple = 12
snake_case__ : Union[str, Any] = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ : str = 384
snake_case__ : Any = 1_536
snake_case__ : str = 12
snake_case__ : int = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ : Union[str, Any] = 1_024
snake_case__ : Any = 4_096
snake_case__ : List[Any] = 24
snake_case__ : Tuple = 16
# load original model from timm
snake_case__ : List[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[Any] = timm_model.state_dict()
snake_case__ : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
snake_case__ : Optional[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : List[Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : Optional[Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size )
snake_case__ : str = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ : Optional[Any] = encoding["""pixel_values"""]
snake_case__ : Tuple = model(_lowerCAmelCase )
snake_case__ : Optional[int] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 35
| 0
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=2 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=36 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=6 , UpperCamelCase__=6 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , UpperCamelCase__=1000 , ) -> str:
lowerCamelCase : int = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = num_channels
lowerCamelCase : Dict = image_size
lowerCamelCase : Union[str, Any] = patch_size
lowerCamelCase : Dict = is_training
lowerCamelCase : List[str] = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : Dict = use_labels
lowerCamelCase : Union[str, Any] = vocab_size
lowerCamelCase : List[Any] = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : int = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = max_position_embeddings
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : str = type_sequence_label_size
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = coordinate_size
lowerCamelCase : Tuple = shape_size
lowerCamelCase : List[Any] = num_labels
lowerCamelCase : Tuple = num_choices
lowerCamelCase : int = scope
lowerCamelCase : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase : Dict = text_seq_length
lowerCamelCase : List[Any] = (image_size // patch_size) ** 2 + 1
lowerCamelCase : Union[str, Any] = self.text_seq_length + self.image_seq_length
def _lowercase ( self ) -> int:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
lowerCamelCase : Optional[int] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase : Dict = bbox[i, j, 3]
lowerCamelCase : Optional[int] = bbox[i, j, 1]
lowerCamelCase : Any = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase : List[str] = bbox[i, j, 2]
lowerCamelCase : Optional[int] = bbox[i, j, 0]
lowerCamelCase : Tuple = tmp_coordinate
lowerCamelCase : Union[str, Any] = tf.constant(UpperCamelCase__ )
lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Optional[int] = None
if self.use_input_mask:
lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCamelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCamelCase : str = None
lowerCamelCase : Optional[int] = None
if self.use_labels:
lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCamelCase : Any = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
lowerCamelCase : Optional[int] = TFLayoutLMvaModel(config=UpperCamelCase__ )
# text + image
lowerCamelCase : Any = model(UpperCamelCase__ , pixel_values=UpperCamelCase__ , training=UpperCamelCase__ )
lowerCamelCase : Optional[int] = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , training=UpperCamelCase__ , )
lowerCamelCase : Any = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCamelCase : Dict = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCamelCase : Union[str, Any] = model({"pixel_values": pixel_values} , training=UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : str = self.num_labels
lowerCamelCase : Optional[Any] = TFLayoutLMvaForSequenceClassification(config=UpperCamelCase__ )
lowerCamelCase : str = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : int = self.num_labels
lowerCamelCase : str = TFLayoutLMvaForTokenClassification(config=UpperCamelCase__ )
lowerCamelCase : Tuple = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = 2
lowerCamelCase : Dict = TFLayoutLMvaForQuestionAnswering(config=UpperCamelCase__ )
lowerCamelCase : Tuple = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , training=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self ) -> str:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : List[str] = config_and_inputs
lowerCamelCase : Dict = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase_ : Dict = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCamelCase_ : int = False
lowerCamelCase_ : int = False
lowerCamelCase_ : str = False
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
return True
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> dict:
lowerCamelCase : Optional[Any] = copy.deepcopy(UpperCamelCase__ )
if model_class in get_values(UpperCamelCase__ ):
lowerCamelCase : List[str] = {
k: tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
lowerCamelCase : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase__ ):
lowerCamelCase : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase__ ):
lowerCamelCase : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Dict = TFLayoutLMvaModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = model_class(UpperCamelCase__ )
if getattr(UpperCamelCase__ , "hf_compute_loss" , UpperCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
lowerCamelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCamelCase__ )[0]
]
lowerCamelCase : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowerCamelCase : int = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCamelCase : Dict = prepared_for_class.pop("input_ids" )
lowerCamelCase : Optional[int] = model(UpperCamelCase__ , **UpperCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
lowerCamelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCamelCase : Dict = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
lowerCamelCase : List[str] = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
lowerCamelCase : Optional[int] = -100
lowerCamelCase : Any = tf.convert_to_tensor(UpperCamelCase__ )
lowerCamelCase : Dict = model(UpperCamelCase__ , **UpperCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
lowerCamelCase : str = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCamelCase : Optional[int] = model(UpperCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
lowerCamelCase : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ )
# Get keys that were added with the _prepare_for_class function
lowerCamelCase : Any = prepared_for_class.keys() - inputs_dict.keys()
lowerCamelCase : Any = inspect.signature(model.call ).parameters
lowerCamelCase : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
lowerCamelCase : Any = {0: "input_ids"}
for label_key in label_keys:
lowerCamelCase : Optional[Any] = signature_names.index(UpperCamelCase__ )
lowerCamelCase : int = label_key
lowerCamelCase : str = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
lowerCamelCase : List[str] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
lowerCamelCase : Optional[Any] = prepared_for_class[value]
lowerCamelCase : Any = tuple(UpperCamelCase__ )
# Send to model
lowerCamelCase : List[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _lowercase ( self ) -> Optional[Any]:
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self ) -> List[str]:
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : List[str] = type
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self ) -> Optional[int]:
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self ) -> Any:
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@slow
def _lowercase ( self ) -> Optional[int]:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Dict = TFLayoutLMvaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def A ( ) -> int:
lowerCamelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> Union[str, Any]:
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) if is_vision_available() else None
@slow
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Optional[Any] = prepare_img()
lowerCamelCase : Dict = image_processor(images=UpperCamelCase__ , return_tensors="tf" ).pixel_values
lowerCamelCase : Union[str, Any] = tf.constant([[1, 2]] )
lowerCamelCase : Dict = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
lowerCamelCase : List[Any] = model(input_ids=UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , training=UpperCamelCase__ )
# verify the logits
lowerCamelCase : Dict = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ )
lowerCamelCase : Dict = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 48
|
'''simple docstring'''
import string
from math import logaa
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : List[str] = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
snake_case__ : List[str] = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int, int]:
snake_case__ : Dict = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
snake_case__ : Any = corpus_without_punctuation.split("""\n""" )
snake_case__ : int = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_lowerCAmelCase ))
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float:
return round(tf * idf , 3 )
| 35
| 0
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = AlbertConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__a = AlbertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 49
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : int , snake_case_ : Tuple , snake_case_ : List[str]=3 , snake_case_ : Tuple=32 , snake_case_ : List[Any]=3 , snake_case_ : List[str]=10 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : Tuple=[1, 1, 2, 1] , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : int="relu" , snake_case_ : List[Any]=3 , snake_case_ : str=None , ):
snake_case__ : List[Any] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : int = image_size
snake_case__ : List[Any] = num_channels
snake_case__ : Optional[Any] = embeddings_size
snake_case__ : Optional[int] = hidden_sizes
snake_case__ : Tuple = depths
snake_case__ : Any = is_training
snake_case__ : Optional[int] = use_labels
snake_case__ : Optional[int] = hidden_act
snake_case__ : Optional[int] = num_labels
snake_case__ : int = scope
snake_case__ : Tuple = len(snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : int ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ):
snake_case__ : Optional[Any] = TFResNetModel(config=snake_case_ )
snake_case__ : int = model(snake_case_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] ):
snake_case__ : str = self.num_labels
snake_case__ : Optional[int] = TFResNetForImageClassification(snake_case_ )
snake_case__ : Tuple = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Tuple ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs
snake_case__ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowercase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Tuple = TFResNetModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowerCamelCase ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : str ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase ( self : int ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase ( self : List[Any] ):
pass
def lowerCamelCase ( self : List[Any] ):
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(snake_case_ )
snake_case__ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase ( self : List[str] ):
def check_hidden_states_output(snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] ):
snake_case__ : List[Any] = model_class(snake_case_ )
snake_case__ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
snake_case__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ : Dict = layer_type
snake_case__ : Optional[int] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[Any] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def lowerCamelCase ( self : Optional[Any] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFResNetModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def __snake_case( ) -> Optional[int]:
snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase ( self : List[Any] ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : List[Any] = self.default_image_processor
snake_case__ : List[Any] = prepare_img()
snake_case__ : List[str] = image_processor(images=snake_case_ , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[Any] = model(**snake_case_ )
# verify the logits
snake_case__ : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case_ , atol=1E-4 ) )
| 35
| 0
|
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
create_state_space_tree(_UpperCAmelCase , [] , 0 )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
if index == len(_UpperCAmelCase ):
print(_UpperCAmelCase )
return
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_UpperCAmelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 50
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "glpn"
def __init__( self : Optional[Any] , snake_case_ : List[str]=3 , snake_case_ : Dict=4 , snake_case_ : List[Any]=[2, 2, 2, 2] , snake_case_ : int=[8, 4, 2, 1] , snake_case_ : List[str]=[32, 64, 160, 256] , snake_case_ : Tuple=[7, 3, 3, 3] , snake_case_ : List[Any]=[4, 2, 2, 2] , snake_case_ : Tuple=[1, 2, 5, 8] , snake_case_ : List[str]=[4, 4, 4, 4] , snake_case_ : Optional[int]="gelu" , snake_case_ : Dict=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : List[Any]=0.02 , snake_case_ : Tuple=0.1 , snake_case_ : Any=1E-6 , snake_case_ : Dict=64 , snake_case_ : Tuple=10 , snake_case_ : List[Any]=-1 , **snake_case_ : Optional[Any] , ):
super().__init__(**snake_case_ )
snake_case__ : Optional[Any] = num_channels
snake_case__ : Dict = num_encoder_blocks
snake_case__ : Tuple = depths
snake_case__ : Union[str, Any] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : Optional[Any] = patch_sizes
snake_case__ : int = strides
snake_case__ : List[Any] = mlp_ratios
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : str = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : Tuple = decoder_hidden_size
snake_case__ : List[Any] = max_depth
snake_case__ : Dict = head_in_index
| 35
| 0
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : Optional[int] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def A (__A : str , __A : Optional[int] , __A : Optional[Any] , __A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
UpperCAmelCase_ = TOKENIZER_CLASSES
else:
UpperCAmelCase_ = {tokenizer_name: getattr(__A , tokenizer_name + '''Fast''' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
UpperCAmelCase_ = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase_ = True
if checkpoint_name is None:
UpperCAmelCase_ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase_ = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
UpperCAmelCase_ = tokenizer_class.from_pretrained(__A , force_download=__A )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase_ , UpperCAmelCase_ = checkpoint.split('''/''' )
UpperCAmelCase_ = os.path.join(__A , __A )
elif add_prefix:
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = dump_path
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase_ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase_ = file_path.split(__A )[-1][0]
if next_char == "/":
UpperCAmelCase_ = os.path.join(__A , __A )
UpperCAmelCase_ = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
UpperCAmelCase_ = tokenizer.save_pretrained(
__A , legacy_format=__A , filename_prefix=__A )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(__A )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
snake_case_ : Union[str, Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 51
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__a = logging.get_logger(__name__)
__a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
__a = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
__a = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = RoFormerTokenizer
def __init__( self : List[Any] , snake_case_ : List[str]=None , snake_case_ : Dict=None , snake_case_ : Any=True , snake_case_ : str="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : Optional[Any]="[PAD]" , snake_case_ : Union[str, Any]="[CLS]" , snake_case_ : Union[str, Any]="[MASK]" , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=None , **snake_case_ : Tuple , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
snake_case__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , snake_case_ ) != strip_accents
):
snake_case__ : str = getattr(snake_case_ , pre_tok_state.pop("""type""" ) )
snake_case__ : Optional[int] = do_lower_case
snake_case__ : Union[str, Any] = strip_accents
snake_case__ : Union[str, Any] = pre_tok_class(**snake_case_ )
snake_case__ : str = do_lower_case
def __getstate__( self : int ):
snake_case__ : List[Any] = self.__dict__.copy()
snake_case__ : str = BertPreTokenizer()
return state
def __setstate__( self : Dict , snake_case_ : Dict ):
snake_case__ : List[Any] = d
snake_case__ : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab()
snake_case__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) )
def lowerCamelCase ( self : str , snake_case_ : Optional[Any] , snake_case_ : List[str]=None ):
snake_case__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
snake_case__ : int = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Optional[str] = None ):
snake_case__ : Union[str, Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCamelCase ( self : Dict , snake_case_ : List[str] , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ):
snake_case__ : Optional[Any] = BertPreTokenizer()
return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
| 35
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A__ ( __snake_case , __snake_case ):
_UpperCAmelCase :Dict = 1
@register_to_config
def __init__( self , A_=2000 , A_=0.1 , A_=20 , A_=1e-3 ):
'''simple docstring'''
UpperCamelCase : List[Any] = None
UpperCamelCase : Any = None
UpperCamelCase : Dict = None
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Dict = torch.linspace(1 , self.config.sampling_eps , A_ , device=A_ )
def __UpperCamelCase( self , A_ , A_ , A_ , A_=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
UpperCamelCase : int = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
UpperCamelCase : List[str] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
UpperCamelCase : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
UpperCamelCase : Optional[Any] = std.unsqueeze(-1 )
UpperCamelCase : Union[str, Any] = -score / std
# compute
UpperCamelCase : str = -1.0 / len(self.timesteps )
UpperCamelCase : Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
UpperCamelCase : Tuple = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
UpperCamelCase : Optional[int] = beta_t.unsqueeze(-1 )
UpperCamelCase : List[Any] = -0.5 * beta_t * x
UpperCamelCase : List[Any] = torch.sqrt(A_ )
UpperCamelCase : Any = drift - diffusion**2 * score
UpperCamelCase : List[Any] = x + drift * dt
# add noise
UpperCamelCase : str = randn_tensor(x.shape , layout=x.layout , generator=A_ , device=x.device , dtype=x.dtype )
UpperCamelCase : Union[str, Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 52
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : int = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ : Dict = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ : List[str] = 0.01
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
snake_case__ : str = time.time()
locka.acquire(_lowerCAmelCase )
assert time.time() - _start > timeout
def __snake_case( _lowerCAmelCase ) -> Tuple:
snake_case__ : Dict = """a""" * 1_000 + """.lock"""
snake_case__ : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(_lowerCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
snake_case__ : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
locka.acquire(0 )
| 35
| 0
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , __A : int = 7_6_8 , ):
super().__init__()
__UpperCamelCase = nn.Parameter(torch.zeros(1 , __A ) )
__UpperCamelCase = nn.Parameter(torch.ones(1 , __A ) )
def _lowerCamelCase ( self : Optional[Any] , __A : Optional[Union[str, torch.device]] = None , __A : Optional[torch.dtype] = None , ):
__UpperCamelCase = nn.Parameter(self.mean.to(__A ).to(__A ) )
__UpperCamelCase = nn.Parameter(self.std.to(__A ).to(__A ) )
return self
def _lowerCamelCase ( self : Optional[int] , __A : Optional[int] ):
__UpperCamelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def _lowerCamelCase ( self : Optional[int] , __A : Tuple ):
__UpperCamelCase = (embeds * self.std) + self.mean
return embeds
| 53
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
snake_case__ : str = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __snake_case( ) -> List[str]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ ( metaclass=UpperCamelCase):
"""simple docstring"""
snake_case__ : Optional[Any] = ["keras_nlp"]
def __init__( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]:
requires_backends(self , ["keras_nlp"] )
| 54
|
'''simple docstring'''
__a = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__a = frozenset(["prompt", "negative_prompt"])
__a = frozenset([])
__a = frozenset(["image"])
__a = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["image"])
__a = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__a = frozenset(["prompt", "image", "negative_prompt"])
__a = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__a = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__a = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["image", "mask_image"])
__a = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["example_image", "image", "mask_image"])
__a = frozenset(["class_labels"])
__a = frozenset(["class_labels"])
__a = frozenset(["batch_size"])
__a = frozenset([])
__a = frozenset(["batch_size"])
__a = frozenset([])
__a = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__a = frozenset(["prompt", "negative_prompt"])
__a = frozenset(["input_tokens"])
__a = frozenset(["input_tokens"])
| 35
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a_ : List[Any] = logging.get_logger(__name__)
a_ : Any = """▁"""
a_ : Dict = {"""vocab_file""": """sentencepiece.bpe.model"""}
a_ : Dict = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
a_ : str = {
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
a_ : List[str] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = ["input_ids", "attention_mask"]
_lowerCamelCase = []
_lowerCamelCase = []
def __init__( self , UpperCamelCase , UpperCamelCase="<s>" , UpperCamelCase="</s>" , UpperCamelCase="</s>" , UpperCamelCase="<s>" , UpperCamelCase="<unk>" , UpperCamelCase="<pad>" , UpperCamelCase="<mask>" , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase = None , UpperCamelCase=None , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase_ = legacy_behaviour
super().__init__(
bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , tokenizer_file=UpperCamelCase , src_lang=UpperCamelCase , tgt_lang=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase ) )
lowerCamelCase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase_ = 1
lowerCamelCase_ = len(self.sp_model )
lowerCamelCase_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase )
}
lowerCamelCase_ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCamelCase_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCamelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCamelCase_ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowerCamelCase_ = src_lang if src_lang is not None else "eng_Latn"
lowerCamelCase_ = self.lang_code_to_id[self._src_lang]
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
lowerCamelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def snake_case ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
lowerCamelCase_ = [1] * len(self.prefix_tokens )
lowerCamelCase_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase )) + ([0] * len(UpperCamelCase )) + suffix_ones
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
lowerCamelCase_ = self.convert_tokens_to_ids(UpperCamelCase )
lowerCamelCase_ = tgt_lang_id
return inputs
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ = self.sp_model.PieceToId(UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = "".join(UpperCamelCase ).replace(UpperCamelCase , " " ).strip()
return out_string
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , "wb" ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
def snake_case ( self , UpperCamelCase , UpperCamelCase = "eng_Latn" , UpperCamelCase = None , UpperCamelCase = "fra_Latn" , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase , UpperCamelCase , **UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase_ = [self.cur_lang_code]
lowerCamelCase_ = [self.eos_token_id]
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase_ = [self.cur_lang_code]
lowerCamelCase_ = [self.eos_token_id]
| 55
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = GPTSanJapaneseTokenizer
lowercase = False
lowercase = {"do_clean_text": False, "add_prefix_space": False}
def lowerCamelCase ( self : str ):
super().setUp()
# fmt: off
snake_case__ : Optional[Any] = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
snake_case__ : List[Any] = {"""unk_token""": """<unk>"""}
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(snake_case_ ) )
def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : str ):
snake_case__ : Union[str, Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def lowerCamelCase ( self : Any , snake_case_ : Dict ):
snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ )
snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ )
return text, ids
def lowerCamelCase ( self : Optional[Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Union[str, Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : List[str] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : int = """こんにちは、世界。 こんばんは、㔺界。"""
snake_case__ : Optional[int] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
snake_case__ : Dict = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids without special tokens
snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids with special tokens
snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token]
snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : Union[str, Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
snake_case__ : Optional[int] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
snake_case__ : Any = tokenizer.encode(snake_case_ )
snake_case__ : int = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Tuple = """こんにちは、世界。"""
snake_case__ : Optional[Any] = """こんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。こんばんは、世界。😀"""
snake_case__ : Dict = tokenizer.encode(prefix_text + input_text )
snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ )
snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ )
snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ )
snake_case__ : str = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Dict = """こんにちは、世界。"""
snake_case__ : Optional[int] = """こんばんは、㔺界。😀"""
snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1)
snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0]
snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids
snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : Union[str, Any] = tokenizer.encode("""あンいワ""" )
snake_case__ : int = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
snake_case__ : Dict = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ )
snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ )
# fmt: off
snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , snake_case_ )
self.assertListEqual(x_token.token_type_ids , snake_case_ )
self.assertListEqual(x_token.attention_mask , snake_case_ )
self.assertListEqual(x_token_a.input_ids , snake_case_ )
self.assertListEqual(x_token_a.token_type_ids , snake_case_ )
self.assertListEqual(x_token_a.attention_mask , snake_case_ )
def lowerCamelCase ( self : Any ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase ( self : List[str] ):
# tokenizer has no padding token
pass
| 35
| 0
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
a : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( _lowerCamelCase ):
def __init__( self : Dict , lowercase_ : AutoencoderKL , lowercase_ : CLIPTextModel , lowercase_ : CLIPTokenizer , lowercase_ : UNetaDConditionModel , lowercase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowercase_ : StableDiffusionSafetyChecker , lowercase_ : CLIPImageProcessor , ):
super().__init__()
self.register_modules(
vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , )
def A_ ( self : Optional[Any] , lowercase_ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_ )
def A_ ( self : List[str] ):
self.enable_attention_slicing(lowercase_ )
@torch.no_grad()
def __call__( self : int , lowercase_ : Union[str, List[str]] , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 50 , lowercase_ : float = 7.5 , lowercase_ : Optional[Union[str, List[str]]] = None , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , lowercase_ : Optional[torch.FloatTensor] = None , **lowercase_ : Optional[int] , ):
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = 1
elif isinstance(lowercase_ , lowercase_ ):
snake_case_ = len(lowercase_ )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_ )}." )
# get prompt text embeddings
snake_case_ = self.tokenizer(
lowercase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
snake_case_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
snake_case_ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
snake_case_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
snake_case_ ,snake_case_ ,snake_case_ = text_embeddings.shape
snake_case_ = text_embeddings.repeat(1 , lowercase_ , 1 )
snake_case_ = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case_ = 42
if negative_prompt is None:
snake_case_ = ['''''']
elif type(lowercase_ ) is not type(lowercase_ ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_ )} !="
F" {type(lowercase_ )}." )
elif isinstance(lowercase_ , lowercase_ ):
snake_case_ = [negative_prompt]
elif batch_size != len(lowercase_ ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_ )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''' )
else:
snake_case_ = negative_prompt
snake_case_ = text_input_ids.shape[-1]
snake_case_ = self.tokenizer(
lowercase_ , padding='''max_length''' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='''pt''' , )
snake_case_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ = uncond_embeddings.shape[1]
snake_case_ = uncond_embeddings.repeat(lowercase_ , lowercase_ , 1 )
snake_case_ = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
snake_case_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
snake_case_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
snake_case_ = torch.randn(
lowercase_ , generator=lowercase_ , device='''cpu''' , dtype=lowercase_ ).to(self.device )
snake_case_ = torch.randn(lowercase_ , generator=lowercase_ , device='''cpu''' , dtype=lowercase_ ).to(
self.device )
else:
snake_case_ = torch.randn(
lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
snake_case_ = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
snake_case_ = latents_reference.to(self.device )
snake_case_ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
snake_case_ = (latents_shape[3] - latents_shape_reference[3]) // 2
snake_case_ = (latents_shape[2] - latents_shape_reference[2]) // 2
snake_case_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
snake_case_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
snake_case_ = 0 if dx < 0 else dx
snake_case_ = 0 if dy < 0 else dy
snake_case_ = max(-dx , 0 )
snake_case_ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
snake_case_ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowercase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
snake_case_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case_ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ = {}
if accepts_eta:
snake_case_ = eta
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
# predict the noise residual
snake_case_ = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_ ).sample
# perform guidance
if do_classifier_free_guidance:
snake_case_ ,snake_case_ = noise_pred.chunk(2 )
snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_ )
snake_case_ = 1 / 0.1_8215 * latents
snake_case_ = self.vae.decode(lowercase_ ).sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
snake_case_ = self.feature_extractor(self.numpy_to_pil(lowercase_ ) , return_tensors='''pt''' ).to(
self.device )
snake_case_ ,snake_case_ = self.safety_checker(
images=lowercase_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
snake_case_ = None
if output_type == "pil":
snake_case_ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_ )
| 56
|
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = CustomTokenizer
pass
| 35
| 0
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : Optional[Any] = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
A : str = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
A : str = "</w>"
A : Dict = "@@ "
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase = char
return pairs
# Speech2Text2 has no max input length
A : Any = {"facebook/s2t-wav2vec2-large-en-de": 1_0_2_4}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =VOCAB_FILES_NAMES
__UpperCAmelCase : str =PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict =["""input_ids""", """attention_mask"""]
def __init__( self , __a , __a="<s>" , __a="<pad>" , __a="</s>" , __a="<unk>" , __a=False , __a=None , **__a , ):
super().__init__(
unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , do_lower_case=__a , **__a , )
__lowerCAmelCase = do_lower_case
with open(__a , encoding="utf-8" ) as vocab_handle:
__lowerCAmelCase = json.load(__a )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
__lowerCAmelCase = None
__lowerCAmelCase = None
else:
with open(__a , encoding="utf-8" ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split("\n" )[:-1]
__lowerCAmelCase = [tuple(merge.split()[:2] ) for merge in merges]
__lowerCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__lowerCAmelCase = {}
@property
def snake_case ( self ):
return len(self.decoder )
def snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case ( self , __a ):
__lowerCAmelCase = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = get_pairs(__a )
if not pairs:
return token
while True:
__lowerCAmelCase = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(__a ):
try:
__lowerCAmelCase = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(__a )
__lowerCAmelCase = new_word
if len(__a ) == 1:
break
else:
__lowerCAmelCase = get_pairs(__a )
__lowerCAmelCase = " ".join(__a )
if word == "\n " + BPE_TOKEN_MERGES:
__lowerCAmelCase = "\n" + BPE_TOKEN_MERGES
if word.endswith(__a ):
__lowerCAmelCase = word.replace(__a , "" )
__lowerCAmelCase = word.replace(" " , __a )
__lowerCAmelCase = word
return word
def snake_case ( self , __a ):
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
__lowerCAmelCase = text.lower()
__lowerCAmelCase = text.split()
__lowerCAmelCase = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__a ).split(" " ) ) )
return split_tokens
def snake_case ( self , __a ):
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def snake_case ( self , __a ):
__lowerCAmelCase = self.decoder.get(__a , self.unk_token )
return result
def snake_case ( self , __a ):
__lowerCAmelCase = " ".join(__a )
# make sure @@ tokens are concatenated
__lowerCAmelCase = "".join(string.split(__a ) )
return string
def snake_case ( self , __a , __a = None ):
if not os.path.isdir(__a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + "\n" )
__lowerCAmelCase = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__a , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
__lowerCAmelCase = token_index
writer.write(" ".join(__a ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 57
|
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Optional[Any] = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase )
snake_case__ : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : int ):
snake_case__ : Optional[int] = {}
if "second_text" in kwargs:
snake_case__ : Union[str, Any] = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ):
return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework )
def lowerCamelCase ( self : List[Any] , snake_case_ : Dict ):
return self.model(**snake_case_ )
def lowerCamelCase ( self : int , snake_case_ : List[Any] ):
snake_case__ : Union[str, Any] = model_outputs.logits[0].numpy()
snake_case__ : List[str] = softmax(snake_case_ )
snake_case__ : List[str] = np.argmax(snake_case_ )
snake_case__ : List[str] = self.model.config.idalabel[best_class]
snake_case__ : Optional[int] = probabilities[best_class].item()
snake_case__ : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 35
| 0
|
'''simple docstring'''
from itertools import permutations
def lowerCamelCase ( __lowerCamelCase : tuple ) ->bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_SCREAMING_SNAKE_CASE = [7, 11, 13, 17]
for i, test in enumerate(__lowerCamelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCamelCase ( __lowerCamelCase : int = 10 ) ->int:
return sum(
int("""""".join(map(__lowerCamelCase , __lowerCamelCase ) ) )
for num in permutations(range(__lowerCamelCase ) )
if is_substring_divisible(__lowerCamelCase ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 58
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __snake_case( _lowerCAmelCase ) -> Any:
for i in range(0 , _lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def __snake_case( _lowerCAmelCase ) -> List[str]:
for i in range(_lowerCAmelCase , 0 , -1 ):
for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(_lowerCAmelCase ) # upper half
reverse_floyd(_lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
__a = 1
while K:
__a = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__a = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 35
| 0
|
def UpperCamelCase ( __lowerCamelCase : int = 1000 ):
snake_case : List[Any] = 2**power
snake_case : Any = 0
while n:
snake_case , snake_case : Tuple = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 59
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase = 1_000 ) -> int:
return sum(e for e in range(3 , _lowerCAmelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 35
| 0
|
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
def _snake_case ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Optional[Any]=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowerCAmelCase : List[str] = os.path.abspath(_snake_case )
logger.info(f'''Loading PyTorch weights from {pt_path}''' )
lowerCAmelCase : Dict = torch.load(_snake_case , map_location='''cpu''' )
logger.info(f'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
lowerCAmelCase : Union[str, Any] = convert_pytorch_state_dict_to_flax(_snake_case , _snake_case )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCAmelCase : Optional[Any] = convert_pytorch_sharded_state_dict_to_flax(_snake_case , _snake_case )
return flax_state_dict
def _snake_case ( _snake_case : Tuple[str] , _snake_case : np.ndarray , _snake_case : Dict[str, jnp.ndarray] , _snake_case : str , ):
def is_key_or_prefix_key_in_dict(_snake_case : Tuple[str] ) -> bool:
return len(set(_snake_case ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCAmelCase : str = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCAmelCase : int = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCAmelCase : List[Any] = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_snake_case ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCAmelCase : Tuple = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_snake_case ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_snake_case ):
lowerCAmelCase : Union[str, Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase : Any = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_snake_case ):
lowerCAmelCase : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase : str = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase : Tuple = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCAmelCase : str = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCAmelCase : Any = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCAmelCase : Tuple = pt_tuple_key[-2] + '''_v'''
if name is not None:
lowerCAmelCase : Optional[Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Optional[Any] ):
# convert pytorch tensor to numpy
lowerCAmelCase : Any = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCAmelCase : Any = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCAmelCase : Dict = flax_model.params['''params''']
else:
lowerCAmelCase : Dict = flax_model.params
lowerCAmelCase : Tuple = flatten_dict(_snake_case )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCAmelCase : Any = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(_snake_case )
lowerCAmelCase : int = {}
lowerCAmelCase : Dict = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCAmelCase : Dict = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase : Union[str, Any] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCAmelCase : int = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCAmelCase : int = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCAmelCase, lowerCAmelCase : Optional[Any] = rename_key_and_reshape_tensor(
_snake_case , _snake_case , _snake_case , _snake_case )
# add model prefix if necessary
lowerCAmelCase : List[Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCAmelCase : Optional[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCAmelCase : Optional[int] = jnp.asarray(_snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_snake_case , _snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCAmelCase : List[Any] = jnp.asarray(_snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCAmelCase : List[Any] = jnp.asarray(_snake_case )
return unflatten_dict(_snake_case )
def _snake_case ( _snake_case : str , _snake_case : int ):
import torch
# Load the index
lowerCAmelCase : Tuple = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCAmelCase : Optional[int] = torch.load(_snake_case )
lowerCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCAmelCase : Union[str, Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCAmelCase : Any = flax_model.params['''params''']
lowerCAmelCase : str = flatten_dict(_snake_case )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowerCAmelCase : Optional[Any] = flax_model.params
lowerCAmelCase : Dict = flatten_dict(_snake_case )
lowerCAmelCase : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCAmelCase : Tuple = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase : List[str] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCAmelCase : Dict = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCAmelCase : str = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCAmelCase, lowerCAmelCase : Tuple = rename_key_and_reshape_tensor(
_snake_case , _snake_case , _snake_case , _snake_case )
# add model prefix if necessary
lowerCAmelCase : Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCAmelCase : List[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCAmelCase : List[str] = jnp.asarray(_snake_case )
continue
if "var" in flax_key[-1]:
lowerCAmelCase : List[str] = jnp.asarray(_snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_snake_case , _snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCAmelCase : str = jnp.asarray(_snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCAmelCase : Tuple = jnp.asarray(_snake_case )
return unflatten_dict(_snake_case )
def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[str] ):
lowerCAmelCase : Optional[int] = os.path.abspath(_snake_case )
logger.info(f'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
lowerCAmelCase : str = getattr(_snake_case , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(_snake_case , '''rb''' ) as state_f:
try:
lowerCAmelCase : Any = from_bytes(_snake_case , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(_snake_case , _snake_case )
def _snake_case ( _snake_case : Tuple , _snake_case : Dict ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowerCAmelCase : str = flatten_dict(jax.tree_util.tree_map(lambda _snake_case : x.dtype == jnp.bfloataa , _snake_case ) ).values()
if any(_snake_case ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowerCAmelCase : Union[str, Any] = jax.tree_util.tree_map(
lambda _snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _snake_case )
lowerCAmelCase : Union[str, Any] = flatten_dict(_snake_case )
lowerCAmelCase : Any = pt_model.state_dict()
lowerCAmelCase : Optional[Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowerCAmelCase : int = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCAmelCase : Dict = flax_key_tuple[0] == pt_model.base_model_prefix
lowerCAmelCase : List[str] = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCAmelCase : List[Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCAmelCase : Optional[int] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_snake_case ) not in pt_model_dict:
# conv layer
lowerCAmelCase : str = flax_key_tuple[:-1] + ('''weight''',)
lowerCAmelCase : List[Any] = jnp.transpose(_snake_case , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_snake_case ) not in pt_model_dict:
# linear layer
lowerCAmelCase : int = flax_key_tuple[:-1] + ('''weight''',)
lowerCAmelCase : List[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCAmelCase : List[Any] = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCAmelCase : List[str] = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowerCAmelCase : List[Any] = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowerCAmelCase : Union[str, Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCAmelCase : List[Any] = '''.'''.join(_snake_case )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCAmelCase : Optional[int] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCAmelCase : str = key.split('''.''' )
lowerCAmelCase : str = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCAmelCase : Any = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCAmelCase : Optional[int] = key_components[-2] + '''_v'''
if name is not None:
lowerCAmelCase : Union[str, Any] = key_components[:-3] + [name]
lowerCAmelCase : List[str] = '''.'''.join(_snake_case )
lowerCAmelCase : List[str] = key
if flax_key in special_pt_names:
lowerCAmelCase : Optional[int] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowerCAmelCase : Union[str, Any] = np.asarray(_snake_case ) if not isinstance(_snake_case , np.ndarray ) else flax_tensor
lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case )
# remove from missing keys
missing_keys.remove(_snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_snake_case )
pt_model.load_state_dict(_snake_case )
# re-transform missing_keys to list
lowerCAmelCase : Optional[int] = list(_snake_case )
if len(_snake_case ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(_snake_case ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
else:
logger.warning(
f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'''If your task is similar to the task the model of the checkpoint was trained on, '''
f'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 60
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
| 0
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_a = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __a ( __lowerCamelCase ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return max(metric_fn(__lowerCamelCase, __lowerCamelCase ) for gt in ground_truths )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = [line.strip() for line in open(__lowerCamelCase, "r" ).readlines()]
UpperCAmelCase_ : Optional[int] = []
if args.gold_data_mode == "qa":
UpperCAmelCase_ : List[Any] = pd.read_csv(__lowerCamelCase, sep="\t", header=__lowerCamelCase )
for answer_list in data[1]:
UpperCAmelCase_ : str = ast.literal_eval(__lowerCamelCase )
answers.append(__lowerCamelCase )
else:
UpperCAmelCase_ : str = [line.strip() for line in open(__lowerCamelCase, "r" ).readlines()]
UpperCAmelCase_ : Optional[Any] = [[reference] for reference in references]
UpperCAmelCase_ : Optional[Any] = 0
for prediction, ground_truths in zip(__lowerCamelCase, __lowerCamelCase ):
total += 1
em += metric_max_over_ground_truths(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
fa += metric_max_over_ground_truths(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : Optional[int] = 100.0 * em / total
UpperCAmelCase_ : Any = 100.0 * fa / total
logger.info(f"""F1: {fa:.2f}""" )
logger.info(f"""EM: {em:.2f}""" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = args.k
UpperCAmelCase_ : str = [line.strip() for line in open(__lowerCamelCase, "r" ).readlines()]
UpperCAmelCase_ : Union[str, Any] = [line.strip() for line in open(__lowerCamelCase, "r" ).readlines()]
UpperCAmelCase_ : List[str] = 0
for hypo, reference in zip(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = set(hypo.split("\t" )[:k] )
UpperCAmelCase_ : Tuple = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCAmelCase_ : int = 100.0 * em / total
logger.info(f"""Precision@{k}: {em: .2f}""" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
def strip_title(__lowerCamelCase ):
if title.startswith("\"" ):
UpperCAmelCase_ : List[str] = title[1:]
if title.endswith("\"" ):
UpperCAmelCase_ : Union[str, Any] = title[:-1]
return title
UpperCAmelCase_ : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__lowerCamelCase, return_tensors="pt", padding=__lowerCamelCase, truncation=__lowerCamelCase, )["input_ids"].to(args.device )
UpperCAmelCase_ : List[str] = rag_model.rag.question_encoder(__lowerCamelCase )
UpperCAmelCase_ : Tuple = question_enc_outputs[0]
UpperCAmelCase_ : Union[str, Any] = rag_model.retriever(
__lowerCamelCase, question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy(), prefix=rag_model.rag.generator.config.prefix, n_docs=rag_model.config.n_docs, return_tensors="pt", )
UpperCAmelCase_ : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCAmelCase_ : List[Any] = []
for docs in all_docs:
UpperCAmelCase_ : Optional[Any] = [strip_title(__lowerCamelCase ) for title in docs["title"]]
provenance_strings.append("\t".join(__lowerCamelCase ) )
return provenance_strings
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
with torch.no_grad():
UpperCAmelCase_ : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__lowerCamelCase, return_tensors="pt", padding=__lowerCamelCase, truncation=__lowerCamelCase )
UpperCAmelCase_ : Any = inputs_dict.input_ids.to(args.device )
UpperCAmelCase_ : Any = inputs_dict.attention_mask.to(args.device )
UpperCAmelCase_ : str = rag_model.generate( # rag_model overwrites generate
__lowerCamelCase, attention_mask=__lowerCamelCase, num_beams=args.num_beams, min_length=args.min_length, max_length=args.max_length, early_stopping=__lowerCamelCase, num_return_sequences=1, bad_words_ids=[[0, 0]], )
UpperCAmelCase_ : int = rag_model.retriever.generator_tokenizer.batch_decode(__lowerCamelCase, skip_special_tokens=__lowerCamelCase )
if args.print_predictions:
for q, a in zip(__lowerCamelCase, __lowerCamelCase ):
logger.info("Q: {} - A: {}".format(__lowerCamelCase, __lowerCamelCase ) )
return answers
def __a ( ):
UpperCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument(
"--model_type", choices=["rag_sequence", "rag_token", "bart"], type=__lowerCamelCase, help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
), )
parser.add_argument(
"--index_name", default=__lowerCamelCase, choices=["exact", "compressed", "legacy"], type=__lowerCamelCase, help="RAG model retriever type", )
parser.add_argument(
"--index_path", default=__lowerCamelCase, type=__lowerCamelCase, help="Path to the retrieval index", )
parser.add_argument("--n_docs", default=5, type=__lowerCamelCase, help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path", default=__lowerCamelCase, type=__lowerCamelCase, required=__lowerCamelCase, help="Path to pretrained checkpoints or model identifier from huggingface.co/models", )
parser.add_argument(
"--eval_mode", choices=["e2e", "retrieval"], default="e2e", type=__lowerCamelCase, help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
), )
parser.add_argument("--k", default=1, type=__lowerCamelCase, help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set", default=__lowerCamelCase, type=__lowerCamelCase, required=__lowerCamelCase, help="Path to a file containing evaluation samples", )
parser.add_argument(
"--gold_data_path", default=__lowerCamelCase, type=__lowerCamelCase, required=__lowerCamelCase, help="Path to a tab-separated file with gold samples", )
parser.add_argument(
"--gold_data_mode", default="qa", type=__lowerCamelCase, choices=["qa", "ans"], help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
), )
parser.add_argument(
"--predictions_path", type=__lowerCamelCase, default="predictions.txt", help="Name of the predictions file, to be stored in the checkpoints directory", )
parser.add_argument(
"--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", )
parser.add_argument(
"--eval_batch_size", default=8, type=__lowerCamelCase, help="Batch size per GPU/CPU for evaluation.", )
parser.add_argument(
"--recalculate", help="Recalculate predictions even if the prediction file exists", action="store_true", )
parser.add_argument(
"--num_beams", default=4, type=__lowerCamelCase, help="Number of beams to be used when generating answers", )
parser.add_argument("--min_length", default=1, type=__lowerCamelCase, help="Min length of the generated answers" )
parser.add_argument("--max_length", default=50, type=__lowerCamelCase, help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions", action="store_true", help="If True, prints predictions while evaluating.", )
parser.add_argument(
"--print_docs", action="store_true", help="If True, prints docs retried while generating.", )
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
UpperCAmelCase_ : Any = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = {}
if args.model_type is None:
UpperCAmelCase_ : Any = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
UpperCAmelCase_ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
UpperCAmelCase_ : Optional[int] = args.n_docs
if args.index_name is not None:
UpperCAmelCase_ : int = args.index_name
if args.index_path is not None:
UpperCAmelCase_ : Tuple = args.index_path
else:
UpperCAmelCase_ : List[str] = BartForConditionalGeneration
UpperCAmelCase_ : Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s", __lowerCamelCase )
UpperCAmelCase_ : Tuple = get_scores if args.eval_mode == "e2e" else get_precision_at_k
UpperCAmelCase_ : List[Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(__lowerCamelCase, args.predictions_path, args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(__lowerCamelCase ) )
logger.info(" Batch size = %d", args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
UpperCAmelCase_ : Dict = RagRetriever.from_pretrained(__lowerCamelCase, **__lowerCamelCase )
UpperCAmelCase_ : str = model_class.from_pretrained(__lowerCamelCase, retriever=__lowerCamelCase, **__lowerCamelCase )
model.retriever.init_retrieval()
else:
UpperCAmelCase_ : List[Any] = model_class.from_pretrained(__lowerCamelCase, **__lowerCamelCase )
model.to(args.device )
with open(args.evaluation_set, "r" ) as eval_file, open(args.predictions_path, "w" ) as preds_file:
UpperCAmelCase_ : Optional[int] = []
for line in tqdm(__lowerCamelCase ):
questions.append(line.strip() )
if len(__lowerCamelCase ) == args.eval_batch_size:
UpperCAmelCase_ : int = evaluate_batch_fn(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
preds_file.write("\n".join(__lowerCamelCase ) + "\n" )
preds_file.flush()
UpperCAmelCase_ : Dict = []
if len(__lowerCamelCase ) > 0:
UpperCAmelCase_ : List[Any] = evaluate_batch_fn(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
preds_file.write("\n".join(__lowerCamelCase ) )
preds_file.flush()
score_fn(__lowerCamelCase, args.predictions_path, args.gold_data_path )
if __name__ == "__main__":
_a = get_args()
main(args)
| 61
|
'''simple docstring'''
from PIL import Image
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Image:
def brightness(_lowerCAmelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
__a = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 35
| 0
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_A = TypeVar('KEY')
_A = TypeVar('VAL')
@dataclass(frozen=A_ , slots=A_ )
class UpperCAmelCase__ ( Generic[KEY, VAL] ):
"""simple docstring"""
UpperCAmelCase__ : KEY
UpperCAmelCase__ : VAL
class UpperCAmelCase__ ( _Item ):
"""simple docstring"""
def __init__( self ) -> None:
super().__init__(A_ , A_ )
def __bool__( self ) -> bool:
return False
_A = _DeletedItem()
class UpperCAmelCase__ ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , A_ = 8 , A_ = 0.75 ) -> None:
__UpperCamelCase =initial_block_size
__UpperCamelCase =[None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase =capacity_factor
__UpperCamelCase =0
def _a ( self , A_ ) -> int:
return hash(A_ ) % len(self._buckets )
def _a ( self , A_ ) -> int:
return (ind + 1) % len(self._buckets )
def _a ( self , A_ , A_ , A_ ) -> bool:
__UpperCamelCase =self._buckets[ind]
if not stored:
__UpperCamelCase =_Item(A_ , A_ )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase =_Item(A_ , A_ )
return True
else:
return False
def _a ( self ) -> bool:
__UpperCamelCase =len(self._buckets ) * self._capacity_factor
return len(self ) >= int(A_ )
def _a ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase =len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _a ( self , A_ ) -> None:
__UpperCamelCase =self._buckets
__UpperCamelCase =[None] * new_size
__UpperCamelCase =0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _a ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def _a ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def _a ( self , A_ ) -> Iterator[int]:
__UpperCamelCase =self._get_bucket_index(A_ )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase =self._get_next_ind(A_ )
def _a ( self , A_ , A_ ) -> None:
for ind in self._iterate_buckets(A_ ):
if self._try_set(A_ , A_ , A_ ):
break
def __setitem__( self , A_ , A_ ) -> None:
if self._is_full():
self._size_up()
self._add_item(A_ , A_ )
def __delitem__( self , A_ ) -> None:
for ind in self._iterate_buckets(A_ ):
__UpperCamelCase =self._buckets[ind]
if item is None:
raise KeyError(A_ )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase =_deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , A_ ) -> VAL:
for ind in self._iterate_buckets(A_ ):
__UpperCamelCase =self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(A_ )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
__UpperCamelCase =' ,'.join(
f'{item.key}: {item.val}' for item in self._buckets if item )
return f'HashMap({val_string})'
| 62
|
'''simple docstring'''
import argparse
import os
import re
__a = "src/transformers"
# Pattern that looks at the indentation in a line.
__a = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
__a = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__a = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
__a = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__a = re.compile(R"\[([^\]]+)\]")
def __snake_case( _lowerCAmelCase ) -> List[Any]:
snake_case__ : int = _re_indent.search(_lowerCAmelCase )
return "" if search is None else search.groups()[0]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
snake_case__ : str = 0
snake_case__ : Union[str, Any] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCAmelCase ):
index += 1
snake_case__ : Tuple = ["""\n""".join(lines[:index] )]
else:
snake_case__ : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case__ : Optional[int] = [lines[index]]
index += 1
while index < len(_lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCAmelCase ) )
if index < len(_lowerCAmelCase ) - 1:
snake_case__ : str = [lines[index + 1]]
index += 1
else:
snake_case__ : int = []
else:
blocks.append("""\n""".join(_lowerCAmelCase ) )
snake_case__ : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCAmelCase ) > 0:
blocks.append("""\n""".join(_lowerCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCAmelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __snake_case( _lowerCAmelCase ) -> Tuple:
def _inner(_lowerCAmelCase ):
return key(_lowerCAmelCase ).lower().replace("""_""" , """""" )
return _inner
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(_lowerCAmelCase ):
return x
if key is None:
snake_case__ : Optional[int] = noop
# Constants are all uppercase, they go first.
snake_case__ : Optional[int] = [obj for obj in objects if key(_lowerCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case__ : int = [obj for obj in objects if key(_lowerCAmelCase )[0].isupper() and not key(_lowerCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case__ : str = [obj for obj in objects if not key(_lowerCAmelCase )[0].isupper()]
snake_case__ : List[str] = ignore_underscore(_lowerCAmelCase )
return sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> int:
# This inner function sort imports between [ ].
def _replace(_lowerCAmelCase ):
snake_case__ : Union[str, Any] = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
snake_case__ : int = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : List[str] = keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) + "]"
snake_case__ : str = import_statement.split("""\n""" )
if len(_lowerCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case__ : Dict = 2 if lines[1].strip() == """[""" else 1
snake_case__ : str = [(i, _re_strip_line.search(_lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case__ : str = sort_objects(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )
snake_case__ : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
snake_case__ : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ : List[str] = keys[:-1]
snake_case__ : int = get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] )
return "\n".join(_lowerCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
snake_case__ : Optional[Any] = _re_bracket_content.sub(_replace , _lowerCAmelCase )
return import_statement
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=True ) -> Dict:
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
snake_case__ : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case__ : Optional[int] = split_code_in_indented_blocks(
_lowerCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case__ : Optional[Any] = main_blocks[block_idx]
snake_case__ : Dict = block.split("""\n""" )
# Get to the start of the imports.
snake_case__ : Dict = 0
while line_idx < len(_lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case__ : Union[str, Any] = len(_lowerCAmelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case__ : List[str] = """\n""".join(block_lines[line_idx:-1] )
snake_case__ : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case__ : Optional[int] = split_code_in_indented_blocks(_lowerCAmelCase , indent_level=_lowerCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case__ : Tuple = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case__ : Optional[Any] = [(pattern.search(_lowerCAmelCase ).groups()[0] if pattern.search(_lowerCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case__ : Dict = [(i, key) for i, key in enumerate(_lowerCAmelCase ) if key is not None]
snake_case__ : Union[str, Any] = [x[0] for x in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case__ : List[Any] = 0
snake_case__ : Optional[Any] = []
for i in range(len(_lowerCAmelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
snake_case__ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_lowerCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
snake_case__ : Dict = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCAmelCase ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(_lowerCAmelCase ) )
def __snake_case( _lowerCAmelCase=True ) -> Tuple:
snake_case__ : str = []
for root, _, files in os.walk(_lowerCAmelCase ):
if "__init__.py" in files:
snake_case__ : Union[str, Any] = sort_imports(os.path.join(_lowerCAmelCase , """__init__.py""" ) , check_only=_lowerCAmelCase )
if result:
snake_case__ : Union[str, Any] = [os.path.join(_lowerCAmelCase , """__init__.py""" )]
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"Would overwrite {len(_lowerCAmelCase )} files, run `make style`." )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__a = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 35
| 0
|
'''simple docstring'''
from functools import reduce
lowerCAmelCase_ : Optional[Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _lowerCamelCase ( lowercase : str = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowercase , lowercase : str(int(lowercase ) * int(lowercase ) ) , n[i : i + 13] ) )
for i in range(len(lowercase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["note_seq"]
def __init__( self: Dict, *a_: Union[str, Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(self, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[int], *a_: Any, **a_: Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Tuple, *a_: Optional[Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
| 64
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__a = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
snake_case__ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
snake_case__ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
snake_case__ : int = value
elif weight_type == "weight_g":
snake_case__ : List[str] = value
elif weight_type == "weight_v":
snake_case__ : List[str] = value
elif weight_type == "bias":
snake_case__ : Optional[Any] = value
else:
snake_case__ : str = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
snake_case__ : Union[str, Any] = []
snake_case__ : Dict = fairseq_model.state_dict()
snake_case__ : List[Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case__ : Optional[int] = None
for name, value in fairseq_dict.items():
snake_case__ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case__ : Union[str, Any] = True
elif name.split(""".""" )[0] == "proj":
snake_case__ : Tuple = fairseq_model.proj
snake_case__ : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case__ : Optional[Any] = True
if "*" in mapped_key:
snake_case__ : Optional[int] = name.split(_lowerCAmelCase )[0].split(""".""" )[-2]
snake_case__ : Tuple = mapped_key.replace("""*""" , _lowerCAmelCase )
if "weight_g" in name:
snake_case__ : str = """weight_g"""
elif "weight_v" in name:
snake_case__ : int = """weight_v"""
elif "bias" in name:
snake_case__ : Dict = """bias"""
elif "weight" in name:
snake_case__ : Union[str, Any] = """weight"""
else:
snake_case__ : Union[str, Any] = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
snake_case__ : int = full_name.split("""conv_layers.""" )[-1]
snake_case__ : Dict = name.split(""".""" )
snake_case__ : Any = int(items[0] )
snake_case__ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
snake_case__ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
snake_case__ : str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
snake_case__ : Union[str, Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
snake_case__ : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ , snake_case__ : str = emb.weight.shape
snake_case__ : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
snake_case__ : List[str] = emb.weight.data
return lin_layer
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
snake_case__ : int = f.readlines()
snake_case__ : List[Any] = [line.split(""" """ )[0] for line in lines]
snake_case__ : Union[str, Any] = len(_lowerCAmelCase )
snake_case__ : Any = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> int:
snake_case__ : Optional[Any] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
snake_case__ : Optional[Any] = SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase )
snake_case__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
# set weights for wav2vec2 encoder
snake_case__ : Optional[Any] = WavaVecaModel(_lowerCAmelCase )
snake_case__ : Dict = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
snake_case__ : Optional[Any] = SpeechaTextaForCausalLM(_lowerCAmelCase )
snake_case__ , snake_case__ : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
snake_case__ : Tuple = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
snake_case__ : List[Any] = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
snake_case__ : Tuple = False
# add projection layer
snake_case__ : Union[str, Any] = nn.Parameter(projection_layer.weight )
snake_case__ : int = nn.Parameter(projection_layer.bias )
snake_case__ : Tuple = create_vocab_dict(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , """vocab.json""" ) , """w""" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Tuple = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , """vocab.json""" ) )
tokenizer.save_pretrained(_lowerCAmelCase )
snake_case__ : Optional[Any] = hf_wavavec.config.to_dict()
snake_case__ : Tuple = tokenizer.pad_token_id
snake_case__ : Optional[Any] = tokenizer.bos_token_id
snake_case__ : int = tokenizer.eos_token_id
snake_case__ : str = """speech_to_text_2"""
snake_case__ : List[Any] = """wav2vec2"""
snake_case__ : List[str] = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 35
| 0
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 65
|
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Optional[int] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f"{test_file} instead." )
snake_case__ : Dict = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
snake_case__ : int = components[:-1] + [test_fn.replace(""".py""" , """""" )]
snake_case__ : int = """.""".join(_lowerCAmelCase )
return test_module_path
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ : str = get_module_path(_lowerCAmelCase )
snake_case__ : Union[str, Any] = importlib.import_module(_lowerCAmelCase )
return test_module
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : List[Any] = []
snake_case__ : Optional[int] = get_test_module(_lowerCAmelCase )
for attr in dir(_lowerCAmelCase ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : List[str] = []
snake_case__ : Any = get_test_module(_lowerCAmelCase )
for attr in dir(_lowerCAmelCase ):
snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
snake_case__ : List[str] = getattr(_lowerCAmelCase , """all_model_classes""" , [] )
if len(_lowerCAmelCase ) > 0:
test_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : Any = get_test_classes(_lowerCAmelCase )
snake_case__ : Optional[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
snake_case__ : Optional[int] = test_class()
if hasattr(_lowerCAmelCase , """setUp""" ):
test.setUp()
snake_case__ : Any = None
if hasattr(_lowerCAmelCase , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
snake_case__ : Tuple = test.model_tester.__class__
return model_tester
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
snake_case__ : Union[str, Any] = get_test_classes(_lowerCAmelCase )
snake_case__ : str = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
snake_case__ : Optional[Any] = get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Union[str, Any] = []
for test_class in test_classes:
snake_case__ : Tuple = get_model_tester_from_test_class(_lowerCAmelCase )
if tester_class is not None:
tester_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
snake_case__ : Optional[Any] = get_test_classes(_lowerCAmelCase )
snake_case__ : Union[str, Any] = {test_class: get_model_tester_from_test_class(_lowerCAmelCase ) for test_class in test_classes}
return test_tester_mapping
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Any = get_model_classes(_lowerCAmelCase )
snake_case__ : Any = {
model_class: get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes
}
return model_test_mapping
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Union[str, Any] = get_model_classes(_lowerCAmelCase )
snake_case__ : str = {
model_class: get_tester_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def __snake_case( _lowerCAmelCase ) -> int:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return o
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return o.__name__
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return [to_json(_lowerCAmelCase ) for x in o]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {to_json(_lowerCAmelCase ): to_json(_lowerCAmelCase ) for k, v in o.items()}
else:
return o
| 35
| 0
|
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def A_ ( ):
'''simple docstring'''
snake_case_, snake_case_ :Tuple = 9, 14 # noqa: F841
snake_case_ :Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
snake_case_ :Any = defaultdict(_lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
snake_case_ :Union[str, Any] = mst(_lowercase )
snake_case_ :List[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
snake_case_ :Optional[Any] = tuple(answer[:2] )
snake_case_ :Dict = tuple(edge[::-1] )
assert edge in result or reverse in result
| 66
|
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __snake_case( _lowerCAmelCase ) -> List[Any]:
snake_case__ : Dict = SwinConfig()
snake_case__ : Optional[Any] = swin_name.split("""_""" )
snake_case__ : Any = name_split[1]
snake_case__ : List[Any] = int(name_split[4] )
snake_case__ : int = int(name_split[3][-1] )
if model_size == "tiny":
snake_case__ : List[Any] = 96
snake_case__ : int = (2, 2, 6, 2)
snake_case__ : int = (3, 6, 12, 24)
elif model_size == "small":
snake_case__ : Union[str, Any] = 96
snake_case__ : Optional[Any] = (2, 2, 18, 2)
snake_case__ : str = (3, 6, 12, 24)
elif model_size == "base":
snake_case__ : Dict = 128
snake_case__ : str = (2, 2, 18, 2)
snake_case__ : Dict = (4, 8, 16, 32)
else:
snake_case__ : List[str] = 192
snake_case__ : str = (2, 2, 18, 2)
snake_case__ : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
snake_case__ : str = 21_841
else:
snake_case__ : List[str] = 1_000
snake_case__ : int = """huggingface/label-files"""
snake_case__ : Any = """imagenet-1k-id2label.json"""
snake_case__ : List[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : Dict = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : Optional[int] = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case__ : List[Any] = img_size
snake_case__ : Dict = num_classes
snake_case__ : Dict = embed_dim
snake_case__ : Optional[int] = depths
snake_case__ : int = num_heads
snake_case__ : Optional[int] = window_size
return config
def __snake_case( _lowerCAmelCase ) -> Dict:
if "patch_embed.proj" in name:
snake_case__ : List[str] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case__ : int = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
snake_case__ : str = """encoder.""" + name
if "attn.proj" in name:
snake_case__ : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
snake_case__ : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
snake_case__ : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case__ : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case__ : Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
snake_case__ : Tuple = """layernorm.weight"""
if name == "norm.bias":
snake_case__ : Union[str, Any] = """layernorm.bias"""
if "head" in name:
snake_case__ : Optional[int] = name.replace("""head""" , """classifier""" )
else:
snake_case__ : List[str] = """swin.""" + name
return name
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case__ : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
snake_case__ : Dict = key.split(""".""" )
snake_case__ : Optional[int] = int(key_split[1] )
snake_case__ : Union[str, Any] = int(key_split[3] )
snake_case__ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case__ : Optional[Any] = val[:dim, :]
snake_case__ : Tuple = val[
dim : dim * 2, :
]
snake_case__ : Dict = val[-dim:, :]
else:
snake_case__ : Tuple = val[
:dim
]
snake_case__ : int = val[
dim : dim * 2
]
snake_case__ : int = val[
-dim:
]
else:
snake_case__ : Union[str, Any] = val
return orig_state_dict
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : Optional[int] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
snake_case__ : Optional[int] = get_swin_config(_lowerCAmelCase )
snake_case__ : Optional[Any] = SwinForImageClassification(_lowerCAmelCase )
model.eval()
snake_case__ : str = convert_state_dict(timm_model.state_dict() , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
snake_case__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Dict = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
snake_case__ : Dict = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
snake_case__ : Optional[int] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
snake_case__ : Optional[Any] = timm_model(inputs["""pixel_values"""] )
snake_case__ : str = model(**_lowerCAmelCase ).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print(f"Saving model {swin_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 35
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
# Initialise PyTorch model
__lowerCamelCase = FunnelConfig.from_json_file(UpperCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__lowerCamelCase = FunnelBaseModel(UpperCamelCase__ ) if base_model else FunnelModel(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
__UpperCAmelCase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 67
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__a = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : List[str] , *snake_case_ : str , **snake_case_ : List[str] ):
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 35
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase__ = {
"""google/realm-cc-news-pretrained-embedder""": 5_1_2,
"""google/realm-cc-news-pretrained-encoder""": 5_1_2,
"""google/realm-cc-news-pretrained-scorer""": 5_1_2,
"""google/realm-cc-news-pretrained-openqa""": 5_1_2,
"""google/realm-orqa-nq-openqa""": 5_1_2,
"""google/realm-orqa-nq-reader""": 5_1_2,
"""google/realm-orqa-wq-openqa""": 5_1_2,
"""google/realm-orqa-wq-reader""": 5_1_2,
}
lowerCAmelCase__ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = RealmTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> List[str]:
'''simple docstring'''
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowercase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase ) != tokenize_chinese_chars
):
A__ = getattr(lowercase , normalizer_state.pop("type" ) )
A__ = do_lower_case
A__ = strip_accents
A__ = tokenize_chinese_chars
A__ = normalizer_class(**lowercase )
A__ = do_lower_case
def UpperCamelCase ( self , lowercase , **lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = PaddingStrategy.MAX_LENGTH
A__ = text
A__ = kwargs.pop("text_pair" , lowercase )
A__ = kwargs.pop("return_tensors" , lowercase )
A__ = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(lowercase ):
if batch_text_pair is not None:
A__ = batch_text_pair[idx]
else:
A__ = None
A__ = super().__call__(lowercase , lowercase , return_tensors=lowercase , **lowercase )
A__ = encoded_candidates.get("input_ids" )
A__ = encoded_candidates.get("attention_mask" )
A__ = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowercase )
A__ = {key: item for key, item in output_data.items() if len(lowercase ) != 0}
return BatchEncoding(lowercase , tensor_type=lowercase )
def UpperCamelCase ( self , lowercase , lowercase=None ) -> str:
'''simple docstring'''
A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
'''simple docstring'''
A__ = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 68
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__a = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = field(default=_a , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=_a , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=_a , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=_a , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=_a , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def lowerCamelCase ( self : List[str] ):
snake_case__ : int = super().to_dict()
for k, v in d.items():
if isinstance(snake_case_ , snake_case_ ):
snake_case__ : Optional[int] = v.to_dict()
return d
| 35
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> None:
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.', lowerCAmelCase__, )
super().__init__(*lowerCAmelCase__, **lowerCAmelCase__)
| 69
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
snake_case__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = """"""
else:
snake_case__ : Dict = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[int] = in_proj_bias[: config.hidden_size]
snake_case__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Tuple = in_proj_bias[-config.hidden_size :]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : str = dct.pop(_lowerCAmelCase )
snake_case__ : Tuple = val
def __snake_case( ) -> Tuple:
snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : Union[str, Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : int = 1_000
snake_case__ : Any = """huggingface/label-files"""
snake_case__ : Optional[Any] = """imagenet-1k-id2label.json"""
snake_case__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[str] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = int(deit_name[-6:-4] )
snake_case__ : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ : Tuple = 192
snake_case__ : Union[str, Any] = 768
snake_case__ : Tuple = 12
snake_case__ : Union[str, Any] = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ : str = 384
snake_case__ : Any = 1_536
snake_case__ : str = 12
snake_case__ : int = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ : Union[str, Any] = 1_024
snake_case__ : Any = 4_096
snake_case__ : List[Any] = 24
snake_case__ : Tuple = 16
# load original model from timm
snake_case__ : List[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[Any] = timm_model.state_dict()
snake_case__ : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
snake_case__ : Optional[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : List[Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : Optional[Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size )
snake_case__ : str = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ : Optional[Any] = encoding["""pixel_values"""]
snake_case__ : Tuple = model(_lowerCAmelCase )
snake_case__ : Optional[int] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 35
| 0
|
'''simple docstring'''
import sys
from collections import defaultdict
class UpperCAmelCase :
def __init__( self : int ) -> Optional[Any]:
_lowerCAmelCase = []
def lowercase__ ( self : List[str] , __snake_case : Dict ) -> List[Any]:
return self.node_position[vertex]
def lowercase__ ( self : List[Any] , __snake_case : int , __snake_case : Optional[Any] ) -> List[Any]:
_lowerCAmelCase = pos
def lowercase__ ( self : Optional[int] , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ) -> Dict:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_lowerCAmelCase = 2 * start + 1
else:
_lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_lowerCAmelCase , _lowerCAmelCase = heap[smallest_child], positions[smallest_child]
_lowerCAmelCase , _lowerCAmelCase = (
heap[start],
positions[start],
)
_lowerCAmelCase , _lowerCAmelCase = temp, tempa
_lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __snake_case )
self.top_to_bottom(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase__ ( self : Dict , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Optional[int] ) -> Union[str, Any]:
_lowerCAmelCase = position[index]
while index != 0:
_lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_lowerCAmelCase = heap[parent]
_lowerCAmelCase = position[parent]
self.set_position(position[parent] , __snake_case )
else:
_lowerCAmelCase = val
_lowerCAmelCase = temp
self.set_position(__snake_case , __snake_case )
break
_lowerCAmelCase = parent
else:
_lowerCAmelCase = val
_lowerCAmelCase = temp
self.set_position(__snake_case , 0 )
def lowercase__ ( self : Any , __snake_case : int , __snake_case : List[Any] ) -> Dict:
_lowerCAmelCase = len(__snake_case ) // 2 - 1
for i in range(__snake_case , -1 , -1 ):
self.top_to_bottom(__snake_case , __snake_case , len(__snake_case ) , __snake_case )
def lowercase__ ( self : List[Any] , __snake_case : Dict , __snake_case : Tuple ) -> Optional[Any]:
_lowerCAmelCase = positions[0]
_lowerCAmelCase = sys.maxsize
self.top_to_bottom(__snake_case , 0 , len(__snake_case ) , __snake_case )
return temp
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = Heap()
_lowerCAmelCase = [0] * len(lowerCAmelCase )
_lowerCAmelCase = [-1] * len(lowerCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
_lowerCAmelCase = []
for vertex in range(len(lowerCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(lowerCAmelCase )
heap.node_position.append(lowerCAmelCase )
_lowerCAmelCase = []
_lowerCAmelCase = 1
_lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_lowerCAmelCase = 0
_lowerCAmelCase = distance
heap.heapify(lowerCAmelCase , lowerCAmelCase )
for _ in range(1 , len(lowerCAmelCase ) ):
_lowerCAmelCase = heap.delete_minimum(lowerCAmelCase , lowerCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(lowerCAmelCase )]
):
_lowerCAmelCase = distance
heap.bottom_to_top(
lowerCAmelCase , heap.get_position(lowerCAmelCase ) , lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Tuple =int(input('''Enter number of edges: ''').strip())
A__ : Dict =defaultdict(list)
for _ in range(edges_number):
A__ : Any =[int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 70
|
'''simple docstring'''
import string
from math import logaa
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : List[str] = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
snake_case__ : List[str] = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int, int]:
snake_case__ : Dict = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
snake_case__ : Any = corpus_without_punctuation.split("""\n""" )
snake_case__ : int = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_lowerCAmelCase ))
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float:
return round(tf * idf , 3 )
| 35
| 0
|
from manim import *
class __A ( a ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =Rectangle(height=0.5 , width=0.5 )
__UpperCamelCase : Dict =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__UpperCamelCase : Any =[mem.copy() for i in range(6 )]
__UpperCamelCase : Optional[int] =[mem.copy() for i in range(6 )]
__UpperCamelCase : Tuple =VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : int =VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : int =VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : str =Text('CPU' , font_size=24 )
__UpperCamelCase : Optional[int] =Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
__UpperCamelCase : Dict =[mem.copy() for i in range(1 )]
__UpperCamelCase : Tuple =VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : int =Text('GPU' , font_size=24 )
__UpperCamelCase : Dict =Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.align_to(lowerCamelCase__ , lowerCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase__ )
__UpperCamelCase : List[Any] =[mem.copy() for i in range(6 )]
__UpperCamelCase : List[Any] =VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : int =Text('Model' , font_size=24 )
__UpperCamelCase : List[str] =Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , )
__UpperCamelCase : Union[str, Any] =MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
__UpperCamelCase : Union[str, Any] =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCamelCase : Union[str, Any] =MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ , run_time=2.5 ) , Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) )
self.add(lowerCamelCase__ )
__UpperCamelCase : List[str] =[]
__UpperCamelCase : Tuple =[]
__UpperCamelCase : Union[str, Any] =[]
for i, rect in enumerate(lowerCamelCase__ ):
__UpperCamelCase : Any =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
cpu_target.move_to(lowerCamelCase__ )
cpu_target.generate_target()
__UpperCamelCase : Tuple =0.46 / 4
__UpperCamelCase : Optional[Any] =0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase__ , buff=0.0 )
cpu_targs.append(lowerCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase__ ) )
second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 71
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : int , snake_case_ : Tuple , snake_case_ : List[str]=3 , snake_case_ : Tuple=32 , snake_case_ : List[Any]=3 , snake_case_ : List[str]=10 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : Tuple=[1, 1, 2, 1] , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : int="relu" , snake_case_ : List[Any]=3 , snake_case_ : str=None , ):
snake_case__ : List[Any] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : int = image_size
snake_case__ : List[Any] = num_channels
snake_case__ : Optional[Any] = embeddings_size
snake_case__ : Optional[int] = hidden_sizes
snake_case__ : Tuple = depths
snake_case__ : Any = is_training
snake_case__ : Optional[int] = use_labels
snake_case__ : Optional[int] = hidden_act
snake_case__ : Optional[int] = num_labels
snake_case__ : int = scope
snake_case__ : Tuple = len(snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : int ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ):
snake_case__ : Optional[Any] = TFResNetModel(config=snake_case_ )
snake_case__ : int = model(snake_case_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] ):
snake_case__ : str = self.num_labels
snake_case__ : Optional[int] = TFResNetForImageClassification(snake_case_ )
snake_case__ : Tuple = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Tuple ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs
snake_case__ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowercase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Tuple = TFResNetModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowerCamelCase ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : str ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase ( self : int ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase ( self : List[Any] ):
pass
def lowerCamelCase ( self : List[Any] ):
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(snake_case_ )
snake_case__ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase ( self : List[str] ):
def check_hidden_states_output(snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] ):
snake_case__ : List[Any] = model_class(snake_case_ )
snake_case__ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
snake_case__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ : Dict = layer_type
snake_case__ : Optional[int] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[Any] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def lowerCamelCase ( self : Optional[Any] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFResNetModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def __snake_case( ) -> Optional[int]:
snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase ( self : List[Any] ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : List[Any] = self.default_image_processor
snake_case__ : List[Any] = prepare_img()
snake_case__ : List[str] = image_processor(images=snake_case_ , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[Any] = model(**snake_case_ )
# verify the logits
snake_case__ : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case_ , atol=1E-4 ) )
| 35
| 0
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Any = 3
_lowerCamelCase : str = 2_5_0
_lowerCamelCase : Optional[Any] = ids_tensor((batch_size, length) , __lowerCAmelCase )
_lowerCamelCase : List[str] = torch.ones((batch_size, length) , device=__lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self._get_tensors(5 )
_lowerCamelCase : str = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase , _lowerCamelCase : List[Any] = self._get_tensors(9 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase , _lowerCamelCase : Any = self._get_tensors(1_0 )
self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = MaxLengthCriteria(max_length=1_0 )
_lowerCamelCase , _lowerCamelCase : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase , _lowerCamelCase : Any = self._get_tensors(9 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase , _lowerCamelCase : int = self._get_tensors(1_0 )
self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_lowerCamelCase , _lowerCamelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase , _lowerCamelCase : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase , _lowerCamelCase : Any = self._get_tensors(1_0 )
self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase : str = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self._get_tensors(5 )
_lowerCamelCase : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase : Dict = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(__lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
_lowerCamelCase : Dict = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(__lowerCAmelCase ) , 1 )
| 72
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "glpn"
def __init__( self : Optional[Any] , snake_case_ : List[str]=3 , snake_case_ : Dict=4 , snake_case_ : List[Any]=[2, 2, 2, 2] , snake_case_ : int=[8, 4, 2, 1] , snake_case_ : List[str]=[32, 64, 160, 256] , snake_case_ : Tuple=[7, 3, 3, 3] , snake_case_ : List[Any]=[4, 2, 2, 2] , snake_case_ : Tuple=[1, 2, 5, 8] , snake_case_ : List[str]=[4, 4, 4, 4] , snake_case_ : Optional[int]="gelu" , snake_case_ : Dict=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : List[Any]=0.02 , snake_case_ : Tuple=0.1 , snake_case_ : Any=1E-6 , snake_case_ : Dict=64 , snake_case_ : Tuple=10 , snake_case_ : List[Any]=-1 , **snake_case_ : Optional[Any] , ):
super().__init__(**snake_case_ )
snake_case__ : Optional[Any] = num_channels
snake_case__ : Dict = num_encoder_blocks
snake_case__ : Tuple = depths
snake_case__ : Union[str, Any] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : Optional[Any] = patch_sizes
snake_case__ : int = strides
snake_case__ : List[Any] = mlp_ratios
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : str = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : Tuple = decoder_hidden_size
snake_case__ : List[Any] = max_depth
snake_case__ : Dict = head_in_index
| 35
| 0
|
from collections import namedtuple
a =namedtuple("""from_to""", """from_ to""")
a ={
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.0_01, 1000),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.0_04_54, 2_64.1_72),
"""cubicyard""": from_to(0.7_64_55, 1.3_07_95),
"""cubicfoot""": from_to(0.0_28, 35.31_47),
"""cup""": from_to(0.0_00_23_65_88, 42_26.75),
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ ', '.join(lowerCamelCase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ ', '.join(lowerCamelCase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__a = logging.get_logger(__name__)
__a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
__a = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
__a = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = RoFormerTokenizer
def __init__( self : List[Any] , snake_case_ : List[str]=None , snake_case_ : Dict=None , snake_case_ : Any=True , snake_case_ : str="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : Optional[Any]="[PAD]" , snake_case_ : Union[str, Any]="[CLS]" , snake_case_ : Union[str, Any]="[MASK]" , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=None , **snake_case_ : Tuple , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
snake_case__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , snake_case_ ) != strip_accents
):
snake_case__ : str = getattr(snake_case_ , pre_tok_state.pop("""type""" ) )
snake_case__ : Optional[int] = do_lower_case
snake_case__ : Union[str, Any] = strip_accents
snake_case__ : Union[str, Any] = pre_tok_class(**snake_case_ )
snake_case__ : str = do_lower_case
def __getstate__( self : int ):
snake_case__ : List[Any] = self.__dict__.copy()
snake_case__ : str = BertPreTokenizer()
return state
def __setstate__( self : Dict , snake_case_ : Dict ):
snake_case__ : List[Any] = d
snake_case__ : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab()
snake_case__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) )
def lowerCamelCase ( self : str , snake_case_ : Optional[Any] , snake_case_ : List[str]=None ):
snake_case__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
snake_case__ : int = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Optional[str] = None ):
snake_case__ : Union[str, Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCamelCase ( self : Dict , snake_case_ : List[str] , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ):
snake_case__ : Optional[Any] = BertPreTokenizer()
return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
| 35
| 0
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ):
A = s.rsplit(snake_case__ , snake_case__ )
return new.join(snake_case__ )
def _snake_case ( snake_case__ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def _snake_case ( snake_case__ : Union[str, Any] ):
A = {}
A = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
A = key.replace(F'{group_key}.' , F'{group_key}.group.' )
if "res_path" in key:
A = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
A = rreplace(snake_case__ , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
A = rreplace(snake_case__ , '.b' , '.bias' , 1 )
A = value.float()
return upgrade
@torch.no_grad()
def _snake_case ( snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Tuple=None , snake_case__ : str=True ):
from dall_e import Encoder
A = Encoder()
if os.path.exists(snake_case__ ):
A = torch.load(snake_case__ )
else:
A = torch.hub.load_state_dict_from_url(snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
A = ckpt.state_dict()
encoder.load_state_dict(snake_case__ )
if config_path is not None:
A = FlavaImageCodebookConfig.from_pretrained(snake_case__ )
else:
A = FlavaImageCodebookConfig()
A = FlavaImageCodebook(snake_case__ ).eval()
A = encoder.state_dict()
A = upgrade_state_dict(snake_case__ )
hf_model.load_state_dict(snake_case__ )
A = hf_model.state_dict()
A = count_parameters(snake_case__ )
A = count_parameters(snake_case__ )
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case__ )
else:
return hf_state_dict
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
_lowercase = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 74
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : int = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ : Dict = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ : List[str] = 0.01
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
snake_case__ : str = time.time()
locka.acquire(_lowerCAmelCase )
assert time.time() - _start > timeout
def __snake_case( _lowerCAmelCase ) -> Tuple:
snake_case__ : Dict = """a""" * 1_000 + """.lock"""
snake_case__ : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(_lowerCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
snake_case__ : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
locka.acquire(0 )
| 35
| 0
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __UpperCamelCase ( unittest.TestCase , lowerCamelCase__ ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_tool('''text-classification''' )
self.tool.setup()
lowerCamelCase_ =load_tool('''text-classification''', remote=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tool('''That\'s quite cool''', ['''positive''', '''negative'''] )
self.assertEqual(lowerCAmelCase, '''positive''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.remote_tool('''That\'s quite cool''', ['''positive''', '''negative'''] )
self.assertEqual(lowerCAmelCase, '''positive''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tool(text='''That\'s quite cool''', labels=['''positive''', '''negative'''] )
self.assertEqual(lowerCAmelCase, '''positive''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.remote_tool(text='''That\'s quite cool''', labels=['''positive''', '''negative'''] )
self.assertEqual(lowerCAmelCase, '''positive''' )
| 75
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
snake_case__ : str = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __snake_case( ) -> List[str]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
| 0
|
from __future__ import annotations
from collections import deque
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int , a : list[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(a )
self.set_fail_transitions()
def __UpperCamelCase ( self : Tuple , a : int , a : str ) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __UpperCamelCase ( self : List[str] , a : str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for character in keyword:
SCREAMING_SNAKE_CASE : Optional[int] = self.find_next_state(a , a )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE : int = len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE : Dict = next_state
self.adlist[current_state]["output"].append(a )
def __UpperCamelCase ( self : List[Any] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(a )
SCREAMING_SNAKE_CASE : List[Any] = 0
while q:
SCREAMING_SNAKE_CASE : Any = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.adlist[r]["fail_state"]
while (
self.find_next_state(a , self.adlist[child]["value"] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE : str = self.adlist[state]["fail_state"]
SCREAMING_SNAKE_CASE : Dict = self.find_next_state(
a , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Dict = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def __UpperCamelCase ( self : Optional[Any] , a : str ) -> dict[str, list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : dict = {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(len(a ) ):
while (
self.find_next_state(a , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.adlist[current_state]["fail_state"]
SCREAMING_SNAKE_CASE : Optional[Any] = self.find_next_state(a , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE : Optional[int] = 0
else:
SCREAMING_SNAKE_CASE : Optional[int] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE : str = []
result[key].append(i - len(a ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76
|
'''simple docstring'''
__a = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__a = frozenset(["prompt", "negative_prompt"])
__a = frozenset([])
__a = frozenset(["image"])
__a = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["image"])
__a = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__a = frozenset(["prompt", "image", "negative_prompt"])
__a = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__a = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__a = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["image", "mask_image"])
__a = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["example_image", "image", "mask_image"])
__a = frozenset(["class_labels"])
__a = frozenset(["class_labels"])
__a = frozenset(["batch_size"])
__a = frozenset([])
__a = frozenset(["batch_size"])
__a = frozenset([])
__a = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__a = frozenset(["prompt", "negative_prompt"])
__a = frozenset(["input_tokens"])
__a = frozenset(["input_tokens"])
| 35
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : List[Any] = 'ZinengTang/tvlt-base'
lowercase__ : List[Any] = tempfile.mkdtemp()
def _UpperCAmelCase ( self , **a ) -> Any:
return TvltImageProcessor.from_pretrained(self.checkpoint , **a )
def _UpperCAmelCase ( self , **a ) -> Union[str, Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **a )
def _UpperCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = self.get_image_processor()
lowercase__ : int = self.get_feature_extractor()
lowercase__ : Optional[Any] = TvltProcessor(image_processor=a , feature_extractor=a )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Optional[Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , a )
self.assertIsInstance(processor.image_processor , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = self.get_image_processor()
lowercase__ : Dict = self.get_feature_extractor()
lowercase__ : Optional[Any] = TvltProcessor(image_processor=a , feature_extractor=a )
lowercase__ : Optional[Any] = np.ones([1_2_0_0_0] )
lowercase__ : str = feature_extractor(a , return_tensors='np' )
lowercase__ : Union[str, Any] = processor(audio=a , return_tensors='np' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Union[str, Any] = self.get_image_processor()
lowercase__ : int = self.get_feature_extractor()
lowercase__ : int = TvltProcessor(image_processor=a , feature_extractor=a )
lowercase__ : Tuple = np.ones([3, 2_2_4, 2_2_4] )
lowercase__ : Dict = image_processor(a , return_tensors='np' )
lowercase__ : List[str] = processor(images=a , return_tensors='np' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Tuple = self.get_feature_extractor()
lowercase__ : Union[str, Any] = TvltProcessor(image_processor=a , feature_extractor=a )
lowercase__ : Optional[Any] = np.ones([1_2_0_0_0] )
lowercase__ : Any = np.ones([3, 2_2_4, 2_2_4] )
lowercase__ : Union[str, Any] = processor(audio=a , images=a )
self.assertListEqual(list(inputs.keys() ) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'] )
# test if it raises when no input is passed
with pytest.raises(a ):
processor()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Optional[int] = self.get_feature_extractor()
lowercase__ : Any = TvltProcessor(image_processor=a , feature_extractor=a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
| 77
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = GPTSanJapaneseTokenizer
lowercase = False
lowercase = {"do_clean_text": False, "add_prefix_space": False}
def lowerCamelCase ( self : str ):
super().setUp()
# fmt: off
snake_case__ : Optional[Any] = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
snake_case__ : List[Any] = {"""unk_token""": """<unk>"""}
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(snake_case_ ) )
def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : str ):
snake_case__ : Union[str, Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def lowerCamelCase ( self : Any , snake_case_ : Dict ):
snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ )
snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ )
return text, ids
def lowerCamelCase ( self : Optional[Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Union[str, Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : List[str] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : int = """こんにちは、世界。 こんばんは、㔺界。"""
snake_case__ : Optional[int] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
snake_case__ : Dict = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids without special tokens
snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids with special tokens
snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token]
snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : Union[str, Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
snake_case__ : Optional[int] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
snake_case__ : Any = tokenizer.encode(snake_case_ )
snake_case__ : int = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Tuple = """こんにちは、世界。"""
snake_case__ : Optional[Any] = """こんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。こんばんは、世界。😀"""
snake_case__ : Dict = tokenizer.encode(prefix_text + input_text )
snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ )
snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ )
snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ )
snake_case__ : str = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Dict = """こんにちは、世界。"""
snake_case__ : Optional[int] = """こんばんは、㔺界。😀"""
snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1)
snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0]
snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids
snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : Union[str, Any] = tokenizer.encode("""あンいワ""" )
snake_case__ : int = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
snake_case__ : Dict = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ )
snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ )
# fmt: off
snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , snake_case_ )
self.assertListEqual(x_token.token_type_ids , snake_case_ )
self.assertListEqual(x_token.attention_mask , snake_case_ )
self.assertListEqual(x_token_a.input_ids , snake_case_ )
self.assertListEqual(x_token_a.token_type_ids , snake_case_ )
self.assertListEqual(x_token_a.attention_mask , snake_case_ )
def lowerCamelCase ( self : Any ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase ( self : List[str] ):
# tokenizer has no padding token
pass
| 35
| 0
|
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = generate_pascal_triangle(lowercase_ )
for row_idx in range(lowercase_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def _lowerCAmelCase ( lowercase_ ):
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
UpperCAmelCase = []
for current_row_idx in range(lowercase_ ):
UpperCAmelCase = populate_current_row(lowercase_ , lowercase_ )
triangle.append(lowercase_ )
return triangle
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase , UpperCAmelCase = 1, 1
for current_col_idx in range(1 , lowercase_ ):
calculate_current_element(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return current_row
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
UpperCAmelCase = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase = above_to_left_elt + above_to_right_elt
def _lowerCAmelCase ( lowercase_ ):
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
UpperCAmelCase = [[1]]
for row_index in range(1 , lowercase_ ):
UpperCAmelCase = [0] + result[-1] + [0]
UpperCAmelCase = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase = sum(divmod(lowercase_ , 2 ) )
UpperCAmelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase = row_first_half + row_second_half
result.append(lowercase_ )
return result
def _lowerCAmelCase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase_ , lowercase_ ) -> None:
UpperCAmelCase = F"""{func.__name__}({value})"""
UpperCAmelCase = timeit(F"""__main__.{call}""" , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowercase_ , lowercase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 78
|
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = CustomTokenizer
pass
| 35
| 0
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase_ = logging.getLogger(__name__)
lowerCamelCase_ = 50 # max width of layer names
lowerCamelCase_ = 70 # max width of quantizer names
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
_A = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=__lowercase , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=__lowercase , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=__lowercase , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=__lowercase , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=__lowercase , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=__lowercase , type=__lowercase , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=__lowercase , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def __lowercase ( __lowercase ) -> Tuple:
'''simple docstring'''
if args.calibrator == "max":
_A = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
_A = "histogram"
elif args.calibrator == "mse":
_A = "histogram"
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
_A = QuantDescriptor(num_bits=args.aprec , calib_method=__lowercase )
_A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__lowercase )
quant_nn.QuantLinear.set_default_quant_desc_weight(__lowercase )
def __lowercase ( __lowercase , __lowercase , __lowercase=False , __lowercase=False ) -> Dict:
'''simple docstring'''
logger.info("Configuring Model for Quantization" )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__lowercase , ["embeddings"] , which="weight" , _disabled=__lowercase )
if args.quant_disable:
set_quantizer_by_name(__lowercase , [""] , _disabled=__lowercase )
if args.quant_disable_keyword:
set_quantizer_by_name(__lowercase , args.quant_disable_keyword , _disabled=__lowercase )
if args.quant_disable_layer_module:
set_quantizer_by_name(__lowercase , [R"layer.\d+." + args.quant_disable_layer_module] , _disabled=__lowercase )
if args.quant_enable_layer_module:
set_quantizer_by_name(__lowercase , [R"layer.\d+." + args.quant_enable_layer_module] , _disabled=__lowercase )
if args.recalibrate_weights:
recalibrate_weights(__lowercase )
if args.fuse_qkv:
fuse_qkv(__lowercase , __lowercase )
if args.clip_gelu:
clip_gelu(__lowercase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__lowercase )
def __lowercase ( __lowercase ) -> Tuple:
'''simple docstring'''
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def __lowercase ( __lowercase , __lowercase ) -> Optional[Any]:
'''simple docstring'''
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__lowercase )
def __lowercase ( __lowercase , __lowercase ) -> Union[str, Any]:
'''simple docstring'''
def fusea(__lowercase , __lowercase , __lowercase ):
for mod in [qq, qk, qv]:
if not hasattr(__lowercase , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
_A = qq._amax.detach().item()
_A = qk._amax.detach().item()
_A = qv._amax.detach().item()
_A = max(__lowercase , __lowercase , __lowercase )
qq._amax.fill_(__lowercase )
qk._amax.fill_(__lowercase )
qv._amax.fill_(__lowercase )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def __lowercase ( __lowercase , __lowercase ) -> Optional[Any]:
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
_A = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__lowercase )
_A = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(__lowercase , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
_A = mod.weight.shape[0]
_A = mod._weight_quantizer._amax.detach()
_A = torch.ones(__lowercase , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def __lowercase ( __lowercase ) -> Union[str, Any]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(__lowercase , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_A = set(range(len(mod.weight.size() ) ) ) - axis_set
_A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__lowercase , keepdims=__lowercase ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_A = amax
def __lowercase ( __lowercase , __lowercase=25 , __lowercase=180 , __lowercase=None ) -> int:
'''simple docstring'''
if ignore is None:
_A = []
elif not isinstance(__lowercase , __lowercase ):
_A = [ignore]
_A = 0
for name, mod in model.named_modules():
if not hasattr(__lowercase , "weight" ):
continue
_A = max(__lowercase , len(__lowercase ) )
for name, mod in model.named_modules():
_A = getattr(__lowercase , "_input_quantizer" , __lowercase )
_A = getattr(__lowercase , "_weight_quantizer" , __lowercase )
if not hasattr(__lowercase , "weight" ):
continue
if type(__lowercase ) in ignore:
continue
if [True for s in ignore if type(__lowercase ) is str and s in name]:
continue
_A = F'''Act:{input_q.extra_repr()}'''
_A = F'''Wgt:{weight_q.extra_repr()}'''
_A = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(__lowercase ) <= line_width:
logger.info(__lowercase )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{' ':{name_width}} {wgt_str}''' )
def __lowercase ( __lowercase ) -> List[str]:
'''simple docstring'''
_A = 0
for name, mod in model.named_modules():
if isinstance(__lowercase , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = getattr(__lowercase , __lowercase , __lowercase )
if quantizer_mod is not None:
assert hasattr(__lowercase , __lowercase )
setattr(__lowercase , __lowercase , __lowercase )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def __lowercase ( __lowercase , __lowercase , __lowercase="both" , **__lowercase ) -> str:
'''simple docstring'''
_A = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(__lowercase , __lowercase , "_input_quantizer" , __lowercase , __lowercase )
if which in ["weight", "both"]:
set_quantizer(__lowercase , __lowercase , "_weight_quantizer" , __lowercase , __lowercase )
logger.info(__lowercase )
def __lowercase ( __lowercase , __lowercase , **__lowercase ) -> Optional[int]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(__lowercase , "_input_quantizer" ) or hasattr(__lowercase , "_weight_quantizer" ):
for n in names:
if re.search(__lowercase , __lowercase ):
set_quantizers(__lowercase , __lowercase , **__lowercase )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(__lowercase , __lowercase ):
_A = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(__lowercase , __lowercase , __lowercase )
logger.info(__lowercase )
| 79
|
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Optional[Any] = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase )
snake_case__ : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : int ):
snake_case__ : Optional[int] = {}
if "second_text" in kwargs:
snake_case__ : Union[str, Any] = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ):
return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework )
def lowerCamelCase ( self : List[Any] , snake_case_ : Dict ):
return self.model(**snake_case_ )
def lowerCamelCase ( self : int , snake_case_ : List[Any] ):
snake_case__ : Union[str, Any] = model_outputs.logits[0].numpy()
snake_case__ : List[str] = softmax(snake_case_ )
snake_case__ : List[str] = np.argmax(snake_case_ )
snake_case__ : List[str] = self.model.config.idalabel[best_class]
snake_case__ : Optional[int] = probabilities[best_class].item()
snake_case__ : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 35
| 0
|
'''simple docstring'''
from math import factorial
def _UpperCamelCase ( __A = 20 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCamelCase__ = n // 2
return int(factorial(__A ) / (factorial(__A ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
a__ : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 80
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __snake_case( _lowerCAmelCase ) -> Any:
for i in range(0 , _lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def __snake_case( _lowerCAmelCase ) -> List[str]:
for i in range(_lowerCAmelCase , 0 , -1 ):
for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(_lowerCAmelCase ) # upper half
reverse_floyd(_lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
__a = 1
while K:
__a = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__a = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 35
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.