code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import cva
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : float , lowerCAmelCase : int) -> Dict:
"""simple docstring"""
if k in (0.04, 0.06):
lowercase__ = k
lowercase__ = window_size
else:
raise ValueError('invalid k value')
def __str__( self : Tuple) -> str:
"""simple docstring"""
return str(self.k)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowercase__ = cva.imread(lowerCAmelCase , 0)
lowercase__, lowercase__ = img.shape
lowercase__ = []
lowercase__ = img.copy()
lowercase__ = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB)
lowercase__, lowercase__ = np.gradient(lowerCAmelCase)
lowercase__ = dx**2
lowercase__ = dy**2
lowercase__ = dx * dy
lowercase__ = 0.04
lowercase__ = self.window_size // 2
for y in range(lowerCAmelCase , h - offset):
for x in range(lowerCAmelCase , w - offset):
lowercase__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = (wxx * wyy) - (wxy**2)
lowercase__ = wxx + wyy
lowercase__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 2_55)
return color_img, corner_list
if __name__ == "__main__":
a__ : Dict = HarrisCorner(0.0_4, 3)
a__ , a__ : Dict = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a__ : List[str] = "\\n\n"
a__ : Union[str, Any] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
a__ : Union[str, Any] = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string'),
}) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : int = 16 , lowerCAmelCase : bool = True , lowerCAmelCase : int=None) -> str:
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
lowercase__ = 'cuda'
else:
lowercase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
lowercase__ = AutoModelForCausalLM.from_pretrained(lowerCAmelCase)
lowercase__ = model.to(lowerCAmelCase)
lowercase__ = AutoTokenizer.from_pretrained(lowerCAmelCase)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
lowercase__ = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(lowerCAmelCase) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
lowercase__ = model.config.max_length - 1
else:
lowercase__ = model.config.max_length
lowercase__ = tokenizer(
lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors='pt' , return_attention_mask=lowerCAmelCase , ).to(lowerCAmelCase)
lowercase__ = encodings['input_ids']
lowercase__ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1) , 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1) , 2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
lowercase__ = []
lowercase__ = CrossEntropyLoss(reduction='none')
for start_index in logging.tqdm(range(0 , len(lowerCAmelCase) , lowerCAmelCase)):
lowercase__ = min(start_index + batch_size , len(lowerCAmelCase))
lowercase__ = encoded_texts[start_index:end_index]
lowercase__ = attn_masks[start_index:end_index]
if add_start_token:
lowercase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(lowerCAmelCase)
lowercase__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1)
lowercase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa).to(lowerCAmelCase), attn_mask] , dim=1)
lowercase__ = encoded_batch
with torch.no_grad():
lowercase__ = model(lowerCAmelCase , attention_mask=lowerCAmelCase).logits
lowercase__ = out_logits[..., :-1, :].contiguous()
lowercase__ = labels[..., 1:].contiguous()
lowercase__ = attn_mask[..., 1:].contiguous()
lowercase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2) , lowerCAmelCase) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCAmelCase)}
| 642
|
# Imports
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None) -> Dict:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : str=None) -> int:
"""simple docstring"""
if red is not None:
lowercase__ = red
if green is not None:
lowercase__ = green
if blue is not None:
lowercase__ = blue
if red_edge is not None:
lowercase__ = red_edge
if nir is not None:
lowercase__ = nir
return True
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, Any]="" , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None) -> Union[str, Any]:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
lowercase__ = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!')
return False
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]=0.08 , lowerCAmelCase : Optional[int]=1.22 , lowerCAmelCase : int=0.03) -> List[Any]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
return self.nir - self.green
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def UpperCAmelCase ( self : int , lowerCAmelCase : int=0.16) -> Dict:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[int]=0.5) -> Union[str, Any]:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None) -> Tuple:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
lowercase__ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
lowercase__ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 642
| 1
|
a__ : Optional[Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
a__ : Union[str, Any] = [{"type": "code", "content": INSTALL_CONTENT}]
a__ : str = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 642
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = load_tool('text-classification')
self.tool.setup()
lowercase__ = load_tool('text-classification' , remote=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
lowercase__ = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
lowercase__ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
| 642
| 1
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
a__ : Dict = "scheduler_config.json"
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Tuple = 1
A : Union[str, Any] = 2
A : str = 3
A : Union[str, Any] = 4
A : List[Any] = 5
@dataclass
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : jnp.ndarray
class UpperCAmelCase__:
'''simple docstring'''
A : Dict = SCHEDULER_CONFIG_NAME
A : int = ["dtype"]
A : int = []
A : Optional[Any] = True
@classmethod
def UpperCAmelCase ( cls : str , lowerCAmelCase : Dict[str, Any] = None , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : List[str]=False , **lowerCAmelCase : List[str] , ) -> Optional[int]:
"""simple docstring"""
lowercase__, lowercase__ = cls.load_config(
pretrained_model_name_or_path=lowerCAmelCase , subfolder=lowerCAmelCase , return_unused_kwargs=lowerCAmelCase , **lowerCAmelCase , )
lowercase__, lowercase__ = cls.from_config(lowerCAmelCase , return_unused_kwargs=lowerCAmelCase , **lowerCAmelCase)
if hasattr(lowerCAmelCase , 'create_state') and getattr(lowerCAmelCase , 'has_state' , lowerCAmelCase):
lowercase__ = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, os.PathLike] , lowerCAmelCase : bool = False , **lowerCAmelCase : int) -> str:
"""simple docstring"""
self.save_config(save_directory=lowerCAmelCase , push_to_hub=lowerCAmelCase , **lowerCAmelCase)
@property
def UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCAmelCase ( cls : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = list(set([cls.__name__] + cls._compatibles))
lowercase__ = importlib.import_module(__name__.split('.')[0])
lowercase__ = [
getattr(lowerCAmelCase , lowerCAmelCase) for c in compatible_classes_str if hasattr(lowerCAmelCase , lowerCAmelCase)
]
return compatible_classes
def _lowerCAmelCase ( A__ , A__ ):
assert len(A__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(A__ ) - x.ndim) ) , A__ )
def _lowerCAmelCase ( A__ , A__=0.9_99 , A__=jnp.floataa ):
def alpha_bar(A__ ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
lowercase__ = []
for i in range(A__ ):
lowercase__ = i / num_diffusion_timesteps
lowercase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(A__ ) / alpha_bar(A__ ) , A__ ) )
return jnp.array(A__ , dtype=A__ )
@flax.struct.dataclass
class UpperCAmelCase__:
'''simple docstring'''
A : jnp.ndarray
A : jnp.ndarray
A : jnp.ndarray
@classmethod
def UpperCAmelCase ( cls : Tuple , lowerCAmelCase : str) -> str:
"""simple docstring"""
lowercase__ = scheduler.config
if config.trained_betas is not None:
lowercase__ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
lowercase__ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase__ = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase__ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype)
else:
raise NotImplementedError(
f'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''')
lowercase__ = 1.0 - betas
lowercase__ = jnp.cumprod(lowerCAmelCase , axis=0)
return cls(
alphas=lowerCAmelCase , betas=lowerCAmelCase , alphas_cumprod=lowerCAmelCase , )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = state.alphas_cumprod
lowercase__ = alphas_cumprod[timesteps] ** 0.5
lowercase__ = sqrt_alpha_prod.flatten()
lowercase__ = broadcast_to_shape_from_left(A__ , original_samples.shape )
lowercase__ = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase__ = sqrt_one_minus_alpha_prod.flatten()
lowercase__ = broadcast_to_shape_from_left(A__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__, lowercase__ = get_sqrt_alpha_prod(A__ , A__ , A__ , A__ )
lowercase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__, lowercase__ = get_sqrt_alpha_prod(A__ , A__ , A__ , A__ )
lowercase__ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 642
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = None
A : Optional[int] = None
@property
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size'))
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate'))
self.assertTrue(hasattr(lowerCAmelCase , 'padding_value'))
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name])))
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = self.feat_extract_tester.seq_length_diff
lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowercase__ = self.feat_extract_tester.min_seq_length
lowercase__ = self.feat_extract_tester.batch_size
lowercase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest')
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1]))
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
lowercase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
# truncate to smallest
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]))
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to smallest with np
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to middle
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = 12
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , )
lowercase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase__ = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
@require_torch
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = min(lowerCAmelCase)
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 642
| 1
|
import math
def _lowerCAmelCase ( ):
lowercase__ = input('Enter message: ' )
lowercase__ = int(input(F'''Enter key [2-{len(A__ ) - 1}]: ''' ) )
lowercase__ = input('Encryption/Decryption [e/d]: ' )
if mode.lower().startswith('e' ):
lowercase__ = encrypt_message(A__ , A__ )
elif mode.lower().startswith('d' ):
lowercase__ = decrypt_message(A__ , A__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'''Output:\n{text + '|'}''' )
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = [''] * key
for col in range(A__ ):
lowercase__ = col
while pointer < len(A__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(A__ )
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = math.ceil(len(A__ ) / key )
lowercase__ = key
lowercase__ = (num_cols * num_rows) - len(A__ )
lowercase__ = [''] * num_cols
lowercase__ = 0
lowercase__ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowercase__ = 0
row += 1
return "".join(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 642
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowerCAmelCase ( A__ ):
lowercase__ = prime_factors(A__ )
if is_square_free(A__ ):
return -1 if len(A__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _lowerCAmelCase ( A__ ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class UpperCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : nn.Module , lowerCAmelCase : int) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = module
lowercase__ = nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase , bias=lowerCAmelCase) , nn.Linear(lowerCAmelCase , module.out_features , bias=lowerCAmelCase) , )
lowercase__ = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def UpperCAmelCase ( self : str , lowerCAmelCase : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> List[str]:
"""simple docstring"""
return self.module(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase) + self.adapter(lowerCAmelCase)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = "bigscience/bloom-1b7"
# Constant values
A : List[str] = 2.109_6595_5269_2574
A : Any = "Hello my name is"
A : List[str] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
A : List[Any] = 10
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained(self.model_name)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().setUp()
# Models and tokenizer
lowercase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto')
lowercase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto')
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase , 'quantization_config'))
lowercase__ = config.to_dict()
lowercase__ = config.to_diff_dict()
lowercase__ = config.to_json_string()
def UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
lowercase__ = self.model_fpaa.get_memory_footprint()
lowercase__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
lowercase__ = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer(self.input_text , return_tensors='pt')
lowercase__ = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase) , self.EXPECTED_OUTPUTS)
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
lowercase__ = BitsAndBytesConfig()
lowercase__ = True
lowercase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase , device_map='auto')
lowercase__ = self.tokenizer(self.input_text , return_tensors='pt')
lowercase__ = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase) , self.EXPECTED_OUTPUTS)
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
lowercase__ = BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase):
lowercase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase , load_in_abit=lowerCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase):
# Tries with `str`
self.model_abit.to('cpu')
with self.assertRaises(lowerCAmelCase):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(lowerCAmelCase):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0'))
with self.assertRaises(lowerCAmelCase):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowercase__ = self.tokenizer(self.input_text , return_tensors='pt')
lowercase__ = self.model_fpaa.to(torch.floataa)
lowercase__ = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
# Check this does not throw an error
lowercase__ = self.model_fpaa.to('cpu')
# Check this does not throw an error
lowercase__ = self.model_fpaa.half()
# Check this does not throw an error
lowercase__ = self.model_fpaa.float()
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=lowerCAmelCase , device_map='auto')
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase ( cls : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = 't5-small'
lowercase__ = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
lowercase__ = AutoTokenizer.from_pretrained(cls.model_name)
lowercase__ = 'Translate in German: Hello, my dog is cute'
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
from transformers import TaForConditionalGeneration
lowercase__ = TaForConditionalGeneration._keep_in_fpaa_modules
lowercase__ = None
# test with `t5-small`
lowercase__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto')
lowercase__ = self.tokenizer(self.input_text , return_tensors='pt').to(0)
lowercase__ = model.generate(**lowerCAmelCase)
# test with `flan-t5-small`
lowercase__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase , device_map='auto')
lowercase__ = self.tokenizer(self.input_text , return_tensors='pt').to(0)
lowercase__ = model.generate(**lowerCAmelCase)
lowercase__ = modules
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowercase__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto')
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
lowercase__ = self.tokenizer(self.input_text , return_tensors='pt').to(0)
lowercase__ = model.generate(**lowerCAmelCase)
# test with `flan-t5-small`
lowercase__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase , device_map='auto')
lowercase__ = self.tokenizer(self.input_text , return_tensors='pt').to(0)
lowercase__ = model.generate(**lowerCAmelCase)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
super().setUp()
# model_name
lowercase__ = 'bigscience/bloom-560m'
lowercase__ = 't5-small'
# Different types of model
lowercase__ = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto')
# Sequence classification model
lowercase__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase , device_map='auto')
# CausalLM model
lowercase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto')
# Seq2seq model
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase , device_map='auto')
def UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
super().setUp()
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
lowercase__ = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowercase__ = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
super().setUp()
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase , device_map='balanced')
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
lowercase__ = self.tokenizer(self.input_text , return_tensors='pt')
# Second real batch
lowercase__ = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase) , self.EXPECTED_OUTPUTS)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
lowercase__ = 'facebook/opt-350m'
super().setUp()
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.37.0'):
return
# Step 1: freeze all parameters
lowercase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
lowercase__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowercase__ = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase)):
lowercase__ = LoRALayer(module.q_proj , rank=16)
lowercase__ = LoRALayer(module.k_proj , rank=16)
lowercase__ = LoRALayer(module.v_proj , rank=16)
# Step 3: dummy batch
lowercase__ = self.tokenizer('Test batch ' , return_tensors='pt').to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowercase__ = model.forward(**lowerCAmelCase)
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase , lowerCAmelCase):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(lowerCAmelCase , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[str] = "gpt2-xl"
A : Any = 3.3191_8548_5415_2187
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : List[str] = logging.get_logger(__name__)
a__ : List[Any] = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
A : List[str] = "focalnet"
def __init__( self : Dict , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=96 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int=[1_92, 3_84, 7_68, 7_68] , lowerCAmelCase : str=[2, 2, 6, 2] , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[3, 3, 3, 3] , lowerCAmelCase : int="gelu" , lowerCAmelCase : Any=4.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Tuple=1E-4 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = use_conv_embed
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = focal_levels
lowercase__ = focal_windows
lowercase__ = hidden_act
lowercase__ = mlp_ratio
lowercase__ = hidden_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = use_layerscale
lowercase__ = layerscale_value
lowercase__ = use_post_layernorm
lowercase__ = use_post_layernorm_in_modulation
lowercase__ = normalize_modulator
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = encoder_stride
lowercase__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)]
lowercase__, lowercase__ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
| 642
| 1
|
def _lowerCAmelCase ( A__ ):
if not isinstance(A__ , A__ ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
lowercase__ = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
a__ : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
a__ : Any = {"facebook/blenderbot_small-90M": 5_12}
def _lowerCAmelCase ( A__ ):
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
lowercase__ = set(A__ )
return pairs
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[str] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : int="__start__" , lowerCAmelCase : Dict="__end__" , lowerCAmelCase : Any="__unk__" , lowerCAmelCase : str="__null__" , **lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase)
with open(lowerCAmelCase , encoding='utf-8') as vocab_handle:
lowercase__ = json.load(lowerCAmelCase)
lowercase__ = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase , encoding='utf-8') as merges_handle:
lowercase__ = merges_handle.read().split('\n')[1:-1]
lowercase__ = [tuple(merge.split()) for merge in merges]
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = {}
@property
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return len(self.encoder)
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self : str , lowerCAmelCase : str) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ = re.sub('([.,!?()])' , R' \1' , lowerCAmelCase)
lowercase__ = re.sub('(\')' , R' \1 ' , lowerCAmelCase)
lowercase__ = re.sub(R'\s{2,}' , ' ' , lowerCAmelCase)
if "\n" in token:
lowercase__ = token.replace('\n' , ' __newln__')
lowercase__ = token.split(' ')
lowercase__ = []
for token in tokens:
if not len(lowerCAmelCase):
continue
lowercase__ = token.lower()
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = tuple(list(word[:-1]) + [word[-1] + '</w>'])
lowercase__ = get_pairs(lowerCAmelCase)
if not pairs:
words.append(lowerCAmelCase)
continue
while True:
lowercase__ = min(lowerCAmelCase , key=lambda lowerCAmelCase: self.bpe_ranks.get(lowerCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__, lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(lowerCAmelCase):
try:
lowercase__ = word.index(lowerCAmelCase , lowerCAmelCase)
new_word.extend(word[i:j])
lowercase__ = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowerCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = new_word
if len(lowerCAmelCase) == 1:
break
else:
lowercase__ = get_pairs(lowerCAmelCase)
lowercase__ = '@@ '.join(lowerCAmelCase)
lowercase__ = word[:-4]
lowercase__ = word
words.append(lowerCAmelCase)
return " ".join(lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = re.findall(R'\S+\n?' , lowerCAmelCase)
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase).split(' ')))
return split_tokens
def UpperCAmelCase ( self : int , lowerCAmelCase : str) -> int:
"""simple docstring"""
lowercase__ = token.lower()
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[str]) -> str:
"""simple docstring"""
lowercase__ = ' '.join(lowerCAmelCase).replace('@@ ' , '').strip()
return out_string
def UpperCAmelCase ( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(lowerCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase) + '\n')
lowercase__ = 0
with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!')
lowercase__ = token_index
writer.write(' '.join(lowerCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
| 642
| 1
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
a__ : Optional[int] = True
except ImportError:
a__ : List[Any] = False
a__ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCAmelCase ( A__ ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : ArgumentParser) -> List[str]:
"""simple docstring"""
lowercase__ = parser.add_parser('add-new-model')
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.')
add_new_model_parser.add_argument('--testing_file' , type=lowerCAmelCase , help='Configuration file on which to run.')
add_new_model_parser.add_argument(
'--path' , type=lowerCAmelCase , help='Path to cookiecutter. Should only be used for testing purposes.')
add_new_model_parser.set_defaults(func=lowerCAmelCase)
def __init__( self : Union[str, Any] , lowerCAmelCase : bool , lowerCAmelCase : str , lowerCAmelCase : List[Any]=None , *lowerCAmelCase : int) -> int:
"""simple docstring"""
lowercase__ = testing
lowercase__ = testing_file
lowercase__ = path
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.')
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n')
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowercase__ = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(lowerCAmelCase) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.')
lowercase__ = (
Path(lowerCAmelCase).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
)
lowercase__ = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCAmelCase))
else:
with open(self._testing_file , 'r') as configuration_file:
lowercase__ = json.load(lowerCAmelCase)
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path) , no_input=lowerCAmelCase , extra_context=lowerCAmelCase , )
lowercase__ = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r') as configuration_file:
lowercase__ = json.load(lowerCAmelCase)
lowercase__ = configuration['lowercase_modelname']
lowercase__ = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f'''{directory}/configuration.json''')
lowercase__ = 'PyTorch' in generate_tensorflow_pytorch_and_flax
lowercase__ = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
lowercase__ = 'Flax' in generate_tensorflow_pytorch_and_flax
lowercase__ = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase)
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=lowerCAmelCase)
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , 'w'):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(lowerCAmelCase : Optional[int]):
with open(lowerCAmelCase , 'r') as f:
lowercase__ = f.readlines()
with open(lowerCAmelCase , 'w') as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCAmelCase)
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''')
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''')
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''')
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''')
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''')
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''')
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''')
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''')
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''')
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : List[str]):
# Create temp file
lowercase__, lowercase__ = mkstemp()
lowercase__ = False
with fdopen(lowerCAmelCase , 'w') as new_file:
with open(lowerCAmelCase) as old_file:
for line in old_file:
new_file.write(lowerCAmelCase)
if line_to_copy_below in line:
lowercase__ = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCAmelCase)
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''')
# Copy the file permissions from the old file to the new file
copymode(lowerCAmelCase , lowerCAmelCase)
# Remove original file
remove(lowerCAmelCase)
# Move new file
move(lowerCAmelCase , lowerCAmelCase)
def skip_units(lowerCAmelCase : Tuple):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCAmelCase : Any):
with open(lowerCAmelCase) as datafile:
lowercase__ = []
lowercase__ = False
lowercase__ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowercase__ = line.split('"')[1]
lowercase__ = skip_units(lowerCAmelCase)
elif "# Below: " in line and "##" not in line:
lowercase__ = line.split('"')[1]
lowercase__ = skip_units(lowerCAmelCase)
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
lowercase__ = []
elif "# Replace with" in line and "##" not in line:
lowercase__ = []
elif "##" not in line:
lines_to_copy.append(lowerCAmelCase)
remove(lowerCAmelCase)
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''')
os.rmdir(lowerCAmelCase)
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a__ : Union[str, Any] = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
a__ : List[str] = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def _lowerCAmelCase ( A__ , A__=False ):
lowercase__, lowercase__ = create_model(
'HTSAT-tiny' , 'roberta' , A__ , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=A__ , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def _lowerCAmelCase ( A__ ):
lowercase__ = {}
lowercase__ = r'.*sequential.(\d+).*'
lowercase__ = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase__ = key.replace(A__ , A__ )
if re.match(A__ , A__ ):
# replace sequential layers with list
lowercase__ = re.match(A__ , A__ ).group(1 )
lowercase__ = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(A__ )//3}.linear.''' )
elif re.match(A__ , A__ ):
lowercase__ = int(re.match(A__ , A__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowercase__ = 1 if projecton_layer == 0 else 2
lowercase__ = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowercase__ = value
lowercase__ = mixed_qkv.size(0 ) // 3
lowercase__ = mixed_qkv[:qkv_dim]
lowercase__ = mixed_qkv[qkv_dim : qkv_dim * 2]
lowercase__ = mixed_qkv[qkv_dim * 2 :]
lowercase__ = query_layer
lowercase__ = key_layer
lowercase__ = value_layer
else:
lowercase__ = value
return model_state_dict
def _lowerCAmelCase ( A__ , A__ , A__ , A__=False ):
lowercase__, lowercase__ = init_clap(A__ , enable_fusion=A__ )
clap_model.eval()
lowercase__ = clap_model.state_dict()
lowercase__ = rename_state_dict(A__ )
lowercase__ = ClapConfig()
lowercase__ = enable_fusion
lowercase__ = ClapModel(A__ )
# ignore the spectrogram embedding layer
model.load_state_dict(A__ , strict=A__ )
model.save_pretrained(A__ )
transformers_config.save_pretrained(A__ )
if __name__ == "__main__":
a__ : str = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
a__ : Optional[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 642
|
import heapq
import sys
import numpy as np
a__ : Dict = tuple[int, int]
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = []
lowercase__ = set()
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf')
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return len(self.elements) == 0
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(lowerCAmelCase)
else:
# update
# print("update", item)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
if item in self.set:
self.set.remove(lowerCAmelCase)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
self.set.remove(lowerCAmelCase)
return (priority, item)
def _lowerCAmelCase ( A__ , A__ ):
# euclidean distance
lowercase__ = np.array(A__ )
lowercase__ = np.array(A__ )
return np.linalg.norm(a - b )
def _lowerCAmelCase ( A__ , A__ ):
# integer division by time variable
return consistent_heuristic(A__ , A__ ) // t
def _lowerCAmelCase ( A__ , A__ ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ )
return ans
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = np.chararray((n, n) )
for i in range(A__ ):
for j in range(A__ ):
lowercase__ = '*'
for i in range(A__ ):
for j in range(A__ ):
if (j, (n - 1) - i) in blocks:
lowercase__ = '#'
lowercase__ = '-'
lowercase__ = back_pointer[goal]
while x != start:
((lowercase__), (lowercase__)) = x
# print(x)
lowercase__ = '-'
lowercase__ = back_pointer[x]
lowercase__ = '-'
for i in range(A__ ):
for j in range(A__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowercase__ = back_pointer[goal]
while x != start:
print(A__ , end=' ' )
lowercase__ = back_pointer[x]
print(A__ )
sys.exit()
def _lowerCAmelCase ( A__ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
for itera in range(A__ ):
open_list[itera].remove_element(A__ )
# print("s", s)
# print("j", j)
((lowercase__), (lowercase__)) = s
lowercase__ = (x - 1, y)
lowercase__ = (x + 1, y)
lowercase__ = (x, y + 1)
lowercase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(A__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(A__ )
lowercase__ = -1
lowercase__ = float('inf' )
if valid(A__ ) and g_function[neighbours] > g_function[s] + 1:
lowercase__ = g_function[s] + 1
lowercase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) )
if neighbours not in close_list_inad:
for var in range(1 , A__ ):
if key(A__ , A__ , A__ , A__ ) <= Wa * key(
A__ , 0 , A__ , A__ ):
open_list[j].put(
A__ , key(A__ , A__ , A__ , A__ ) )
def _lowerCAmelCase ( ):
lowercase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a__ : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a__ : Any = make_common_ground()
a__ : Union[str, Any] = blocks_blk
# hyper parameters
a__ : List[Any] = 1
a__ : List[str] = 1
a__ : Optional[int] = 20
a__ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a__ : Tuple = (0, 0)
a__ : str = (n - 1, n - 1)
a__ : Optional[Any] = 1
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = {start: 0, goal: float('inf' )}
lowercase__ = {start: -1, goal: -1}
lowercase__ = []
lowercase__ = set()
for i in range(A__ ):
open_list.append(PriorityQueue() )
open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) )
lowercase__ = []
lowercase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , A__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__, lowercase__ = open_list[i].top_show()
visited.add(A__ )
expand_state(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_inad.append(A__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__ = open_list[0].top_show()
visited.add(A__ )
expand_state(
A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_anchor.append(A__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(A__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 642
| 1
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( A__ , A__ , A__ ):
# Initialise PyTorch model
lowercase__ = RemBertConfig.from_json_file(A__ )
print('Building PyTorch model from configuration: {}'.format(str(A__ ) ) )
lowercase__ = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a__ : Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 642
|
import math
import sys
def _lowerCAmelCase ( A__ ):
lowercase__ = ''
try:
with open(A__ , 'rb' ) as binary_file:
lowercase__ = binary_file.read()
for dat in data:
lowercase__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = {'0': '0', '1': '1'}
lowercase__, lowercase__ = '', ''
lowercase__ = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__ = lexicon[curr_string]
result += last_match_id
lowercase__ = last_match_id + '0'
if math.loga(A__ ).is_integer():
lowercase__ = {}
for curr_key in list(A__ ):
lowercase__ = lexicon.pop(A__ )
lowercase__ = new_lex
lowercase__ = last_match_id + '1'
index += 1
lowercase__ = ''
return result
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 8
try:
with open(A__ , 'wb' ) as opened_file:
lowercase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase__ = data_bits[counter:]
lowercase__ = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = read_file_binary(A__ )
lowercase__ = remove_prefix(A__ )
lowercase__ = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 642
| 1
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a__ : int = get_logger(__name__)
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__=0 ):
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase__ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase__ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
lowercase__ = os.path.join(A__ , A__ )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(A__ , A__ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase__ = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
lowercase__ = os.path.join(A__ , A__ )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(A__ , A__ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase__ = os.path.join(A__ , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(F'''Saving model to {ckpt_dir}''' )
lowercase__ = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
lowercase__ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
lowercase__ = os.path.join(A__ , A__ )
logger.info(F'''Loading model from {input_model_file}''' )
lowercase__ = torch.load(A__ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase__ = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
lowercase__ = os.path.join(A__ , A__ )
logger.info(F'''Loading model from {input_model_file}''' )
lowercase__ = torch.load(A__ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase__ = (
os.path.join(A__ , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
lowercase__ = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , )
lowercase__ = state_dict['model']
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(A__ )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__=0 ):
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase__ = FSDP.optim_state_dict(A__ , A__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowercase__ = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
lowercase__ = os.path.join(A__ , A__ )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(A__ , A__ )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
lowercase__ = os.path.join(A__ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase__ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowercase__ = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
lowercase__ = os.path.join(A__ , A__ )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
lowercase__ = torch.load(A__ )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
lowercase__ = (
os.path.join(A__ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
lowercase__ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(A__ ) , )
lowercase__ = optim_state['optimizer']
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
lowercase__ = FSDP.optim_state_dict_to_load(A__ , A__ , A__ )
optimizer.load_state_dict(A__ )
| 642
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {"vocab_file": "vocab.txt"}
a__ : int = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
a__ : Dict = {
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def _lowerCAmelCase ( A__ ):
with open(A__ , 'r' ) as f:
lowercase__ = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Dict="<cls>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Optional[Any]="<eos>" , **lowerCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = load_vocab_file(lowerCAmelCase)
lowercase__ = dict(enumerate(self.all_tokens))
lowercase__ = {tok: ind for ind, tok in enumerate(self.all_tokens)}
lowercase__ = unk_token
lowercase__ = cls_token
lowercase__ = pad_token
lowercase__ = mask_token
lowercase__ = eos_token
lowercase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
return text.split()
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any=False) -> Union[str, Any]:
"""simple docstring"""
return len(self._id_to_token)
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens)}
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.cls_token_id]
lowercase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase__ = [1] + ([0] * len(lowerCAmelCase)) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCAmelCase) + [1]
return mask
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(lowerCAmelCase , 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[List[str], List[AddedToken]] , lowerCAmelCase : bool = False) -> int:
"""simple docstring"""
return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase)
| 642
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a__ : str = logging.get_logger(__name__)
a__ : int = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = "marian"
A : int = ["past_key_values"]
A : Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[str] , lowerCAmelCase : Tuple=5_81_01 , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=10_24 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : List[str]=40_96 , lowerCAmelCase : Any=16 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : Tuple=40_96 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Tuple=10_24 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : int=0.0 , lowerCAmelCase : int=0.02 , lowerCAmelCase : List[str]=5_81_00 , lowerCAmelCase : int=False , lowerCAmelCase : int=5_81_00 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[Any] , ) -> Any:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = decoder_vocab_size or vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def UpperCAmelCase ( self : Dict) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
lowercase__ = {0: 'batch'}
lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs')
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
lowercase__, lowercase__ = self.num_layers
for i in range(lowerCAmelCase):
lowercase__ = {0: 'batch', 2: 'past_sequence + sequence'}
lowercase__ = {0: 'batch', 2: 'past_sequence + sequence'}
else:
lowercase__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
])
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def UpperCAmelCase ( self : List[str]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase__ = super().outputs
else:
lowercase__ = super(lowerCAmelCase , self).outputs
if self.use_past:
lowercase__, lowercase__ = self.num_layers
for i in range(lowerCAmelCase):
lowercase__ = {0: 'batch', 2: 'past_sequence + sequence'}
lowercase__ = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
lowercase__ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
# Generate decoder inputs
lowercase__ = seq_length if not self.use_past else 1
lowercase__ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
lowercase__ = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowercase__ = dict(**lowerCAmelCase , **lowerCAmelCase)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
lowercase__, lowercase__ = common_inputs['input_ids'].shape
lowercase__ = common_inputs['decoder_input_ids'].shape[1]
lowercase__, lowercase__ = self.num_attention_heads
lowercase__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase__ = decoder_seq_length + 3
lowercase__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase__ = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase)] , dim=1)
lowercase__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase__, lowercase__ = self.num_layers
lowercase__ = min(lowerCAmelCase , lowerCAmelCase)
lowercase__ = max(lowerCAmelCase , lowerCAmelCase) - min_num_layers
lowercase__ = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase),
torch.zeros(lowerCAmelCase),
torch.zeros(lowerCAmelCase),
torch.zeros(lowerCAmelCase),
))
# TODO: test this.
lowercase__ = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase , lowerCAmelCase):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase), torch.zeros(lowerCAmelCase)))
return common_inputs
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
lowercase__ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
lowercase__, lowercase__ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowercase__ = seqlen + 2
lowercase__, lowercase__ = self.num_layers
lowercase__, lowercase__ = self.num_attention_heads
lowercase__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase__ = common_inputs['attention_mask'].dtype
lowercase__ = torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase)] , dim=1)
lowercase__ = [
(torch.zeros(lowerCAmelCase), torch.zeros(lowerCAmelCase)) for _ in range(lowerCAmelCase)
]
return common_inputs
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
lowercase__ = compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ = tokenizer.num_special_tokens_to_add(lowerCAmelCase)
lowercase__ = compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase)
# Generate dummy inputs according to compute batch and sequence
lowercase__ = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size
lowercase__ = dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase))
return common_inputs
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase)
else:
lowercase__ = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase)
return common_inputs
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any]) -> Any:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase__ = super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
else:
lowercase__ = super(lowerCAmelCase , self)._flatten_past_key_values_(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
@property
def UpperCAmelCase ( self : Tuple) -> float:
"""simple docstring"""
return 1E-4
| 642
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a__ : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a__ : Optional[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a__ : Tuple = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def UpperCAmelCase ( self : int , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase)
}
| 642
| 1
|
def _lowerCAmelCase ( A__ ):
lowercase__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase__ = set()
return any(
node not in visited and depth_first_search(A__ , A__ , A__ , A__ )
for node in graph )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
visited.add(A__ )
rec_stk.add(A__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A__ , A__ , A__ , A__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 642
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = block_sizes
lowercase__ = num_decoder_layers
lowercase__ = d_model
lowercase__ = n_head
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = 2
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase__ = n_head
# Used in the tests to check the size of the first hidden state
lowercase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase__ = self.num_hidden_layers + 2
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str:
"""simple docstring"""
lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase)
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A : Dict = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Optional[int] = False
A : Optional[int] = False
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase)
@require_tf
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A : List[str] = False
A : int = False
def UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
| 642
| 1
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
a__ : Optional[Any] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_28, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
a__ : str = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
a__ : List[str] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55)
a__ : Optional[Any] = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
a__ : List[Any] = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
a__ : int = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
a__ : Optional[Any] = tf.keras.preprocessing.image.img_to_array(test_image)
a__ : Any = np.expand_dims(test_image, axis=0)
a__ : List[str] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
a__ : Tuple = "Normal"
if result[0][0] == 1:
a__ : Any = "Abnormality detected"
| 642
|
def _lowerCAmelCase ( A__ , A__ , A__ ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
lowercase__ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowercase__ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
import logging
from transformers.configuration_utils import PretrainedConfig
a__ : Any = logging.getLogger(__name__)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Tuple = "masked_bert"
def __init__( self : Dict , lowerCAmelCase : Dict=3_05_22 , lowerCAmelCase : Dict=7_68 , lowerCAmelCase : Any=12 , lowerCAmelCase : int=12 , lowerCAmelCase : List[Any]=30_72 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : int=0.1 , lowerCAmelCase : Dict=5_12 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : str=1E-1_2 , lowerCAmelCase : Tuple=0 , lowerCAmelCase : List[str]="topK" , lowerCAmelCase : Dict="constant" , lowerCAmelCase : Dict=0.0 , **lowerCAmelCase : int , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase)
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = pruning_method
lowercase__ = mask_init
lowercase__ = mask_scale
| 642
|
from __future__ import annotations
def _lowerCAmelCase ( A__ , A__ ):
if b == 0:
return (1, 0)
((lowercase__), (lowercase__)) = extended_euclid(A__ , a % b )
lowercase__ = a // b
return (y, x - k * y)
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowerCAmelCase ( A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
if b < 0:
lowercase__ = (b % n + n) % n
return b
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__, lowercase__ = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 642
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : List[str] = CycleDiffusionPipeline
A : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
A : int = PipelineTesterMixin.required_optional_params - {"latents"}
A : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
A : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
A : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
torch.manual_seed(0)
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , num_train_timesteps=10_00 , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0)
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowercase__ = CLIPTextModel(lowerCAmelCase)
lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
lowercase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int=0) -> Any:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase)).to(lowerCAmelCase)
lowercase__ = image / 2 + 0.5
if str(lowerCAmelCase).startswith('mps'):
lowercase__ = torch.manual_seed(lowerCAmelCase)
else:
lowercase__ = torch.Generator(device=lowerCAmelCase).manual_seed(lowerCAmelCase)
lowercase__ = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
lowercase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = CycleDiffusionPipeline(**lowerCAmelCase)
lowercase__ = pipe.to(lowerCAmelCase)
pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = self.get_dummy_inputs(lowerCAmelCase)
lowercase__ = pipe(**lowerCAmelCase)
lowercase__ = output.images
lowercase__ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowerCAmelCase , 'half'):
lowercase__ = module.half()
lowercase__ = CycleDiffusionPipeline(**lowerCAmelCase)
lowercase__ = pipe.to(lowerCAmelCase)
pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = self.get_dummy_inputs(lowerCAmelCase)
lowercase__ = pipe(**lowerCAmelCase)
lowercase__ = output.images
lowercase__ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@skip_mps
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline')
def UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCAmelCase ( self : int) -> List[str]:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png')
lowercase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy')
lowercase__ = init_image.resize((5_12, 5_12))
lowercase__ = 'CompVis/stable-diffusion-v1-4'
lowercase__ = DDIMScheduler.from_pretrained(lowerCAmelCase , subfolder='scheduler')
lowercase__ = CycleDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase , torch_dtype=torch.floataa , revision='fp16')
pipe.to(lowerCAmelCase)
pipe.set_progress_bar_config(disable=lowerCAmelCase)
pipe.enable_attention_slicing()
lowercase__ = 'A black colored car'
lowercase__ = 'A blue colored car'
lowercase__ = torch.manual_seed(0)
lowercase__ = pipe(
prompt=lowerCAmelCase , source_prompt=lowerCAmelCase , image=lowerCAmelCase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCAmelCase , output_type='np' , )
lowercase__ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image).max() < 5E-1
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png')
lowercase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy')
lowercase__ = init_image.resize((5_12, 5_12))
lowercase__ = 'CompVis/stable-diffusion-v1-4'
lowercase__ = DDIMScheduler.from_pretrained(lowerCAmelCase , subfolder='scheduler')
lowercase__ = CycleDiffusionPipeline.from_pretrained(lowerCAmelCase , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase)
pipe.to(lowerCAmelCase)
pipe.set_progress_bar_config(disable=lowerCAmelCase)
pipe.enable_attention_slicing()
lowercase__ = 'A black colored car'
lowercase__ = 'A blue colored car'
lowercase__ = torch.manual_seed(0)
lowercase__ = pipe(
prompt=lowerCAmelCase , source_prompt=lowerCAmelCase , image=lowerCAmelCase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCAmelCase , output_type='np' , )
lowercase__ = output.images
assert np.abs(image - expected_image).max() < 2E-2
| 642
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[Any] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = "umt5"
A : List[str] = ["past_key_values"]
def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str:
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_kv
lowercase__ = d_ff
lowercase__ = num_layers
lowercase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ = num_heads
lowercase__ = relative_attention_num_buckets
lowercase__ = relative_attention_max_distance
lowercase__ = dropout_rate
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_factor
lowercase__ = feed_forward_proj
lowercase__ = use_cache
lowercase__ = self.feed_forward_proj.split('-')
lowercase__ = act_info[-1]
lowercase__ = act_info[0] == 'gated'
if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
lowercase__ = 'gelu_new'
@property
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
return self.d_model
@property
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return self.num_heads
@property
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return self.num_layers
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase__ = 'past_encoder_sequence + sequence'
lowercase__ = {0: 'batch'}
lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return 13
@property
def UpperCAmelCase ( self : Optional[Any]) -> float:
"""simple docstring"""
return 5E-4
| 642
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a__ : Optional[Any] = 16
a__ : Optional[int] = 32
def _lowerCAmelCase ( A__ , A__ = 16 ):
lowercase__ = AutoTokenizer.from_pretrained('bert-base-cased' )
lowercase__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
A__ , padding='longest' , max_length=A__ , pad_to_multiple_of=A__ , return_tensors='pt' , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
lowercase__ = DataLoader(
tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a__ : Any = mocked_dataloaders # noqa: F811
def _lowerCAmelCase ( A__ , A__ ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , A__ ) == "1":
lowercase__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowercase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowercase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config['lr']
lowercase__ = int(config['num_epochs'] )
lowercase__ = int(config['seed'] )
lowercase__ = int(config['batch_size'] )
set_seed(A__ )
lowercase__, lowercase__ = get_dataloaders(A__ , A__ )
lowercase__ = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowercase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase__ = batch_size // MAX_GPU_BATCH_SIZE
lowercase__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=A__ )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=100 , num_training_steps=(len(A__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowercase__ = os.path.split(A__ )[-1].split('.' )[0]
accelerator.init_trackers(A__ , A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowercase__ = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ = model(**A__ )
lowercase__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A__ )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__, lowercase__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=A__ , references=A__ , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , A__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(A__ ),
'epoch': epoch,
} , step=A__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=A__ , default=A__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=A__ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowercase__ = parser.parse_args()
lowercase__ = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 642
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : str = XGLMTokenizer
A : List[Any] = XGLMTokenizerFast
A : int = True
A : Optional[Any] = True
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = '<pad>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(len(lowerCAmelCase) , 10_08)
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_08)
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
lowercase__ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M')
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name)
lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase)
lowercase__ = pickle.dumps(lowerCAmelCase)
pickle.loads(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
lowercase__ = 'Hello World!'
lowercase__ = [2, 3_12_27, 44_47, 35]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = {
'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
| 642
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a__ : int = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def _lowerCAmelCase ( A__ ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A__ )
def _lowerCAmelCase ( A__ ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowercase__ = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(A__ , id=A__ )
| 642
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = data
lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64)
lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data))
return padded_data
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]:
"""simple docstring"""
lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64
for i in range(16 , 80):
lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.padding()
lowercase__ = self.split_blocks()
for block in self.blocks:
lowercase__ = self.expand_block(lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h
for i in range(0 , 80):
if 0 <= i < 20:
lowercase__ = (b & c) | ((~b) & d)
lowercase__ = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase__ = b ^ c ^ d
lowercase__ = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
lowercase__ = (b & c) | (b & d) | (c & d)
lowercase__ = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
lowercase__ = b ^ c ^ d
lowercase__ = 0XC_A_6_2_C_1_D_6
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = (
self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(lowerCAmelCase , 30),
c,
d,
)
lowercase__ = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h)
def _lowerCAmelCase ( ):
lowercase__ = B'Test String'
assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(A__ , 'utf-8' )
print(SHAaHash(A__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 642
| 1
|
a__ : str = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 642
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a__ : List[Any] = logging.get_logger(__name__)
a__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
a__ : List[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
a__ : int = {
"facebook/bart-base": 10_24,
"facebook/bart-large": 10_24,
"facebook/bart-large-mnli": 10_24,
"facebook/bart-large-cnn": 10_24,
"facebook/bart-large-xsum": 10_24,
"yjernite/bart_eli5": 10_24,
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = ["input_ids", "attention_mask"]
A : Any = BartTokenizer
def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="replace" , lowerCAmelCase : str="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = getattr(lowerCAmelCase , pre_tok_state.pop('type'))
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**lowerCAmelCase)
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = 'post_processor'
lowercase__ = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state['sep'])
if "cls" in state:
lowercase__ = tuple(state['cls'])
lowercase__ = False
if state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get('trim_offsets' , lowerCAmelCase) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(lowerCAmelCase , state.pop('type'))
lowercase__ = component_class(**lowerCAmelCase)
setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
@property
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else value
lowercase__ = value
def UpperCAmelCase ( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None) -> Tuple:
"""simple docstring"""
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 642
| 1
|
import pytest
import datasets
# Import fixture modules as plugins
a__ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def _lowerCAmelCase ( A__ , A__ ):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _lowerCAmelCase ( A__ ):
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=A__ )
def _lowerCAmelCase ( A__ , A__ ):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
lowercase__ = tmp_path_factory.getbasetemp() / 'cache'
lowercase__ = test_hf_cache_home / 'datasets'
lowercase__ = test_hf_cache_home / 'metrics'
lowercase__ = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(A__ ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(A__ ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(A__ ) )
lowercase__ = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(A__ ) )
lowercase__ = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(A__ ) )
@pytest.fixture(autouse=A__ , scope='session' )
def _lowerCAmelCase ( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=A__ )
def _lowerCAmelCase ( A__ ):
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , A__ )
@pytest.fixture
def _lowerCAmelCase ( A__ ):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , A__ )
| 642
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : str = (DDIMParallelScheduler,)
A : Any = (("eta", 0.0), ("num_inference_steps", 50))
def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowerCAmelCase)
return config
def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCAmelCase)
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase)
for t in scheduler.timesteps:
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample
return sample
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase)
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1)
lowercase__ = scheduler_class(**lowerCAmelCase)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1]))
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]):
self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = self.dummy_sample_deter + 0.1
lowercase__ = self.dummy_sample_deter - 0.1
lowercase__ = samplea.shape[0]
lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0)
lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase)
lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 11_47.79_04) < 1E-2
assert abs(result_mean.item() - 0.49_82) < 1E-3
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_72.00_67) < 1E-2
assert abs(result_mean.item() - 0.22_39_67) < 1E-3
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(prediction_type='v_prediction')
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 52.53_02) < 1E-2
assert abs(result_mean.item() - 0.06_84) < 1E-3
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.82_95) < 1E-2
assert abs(result_mean.item() - 0.19_51) < 1E-3
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.07_84) < 1E-2
assert abs(result_mean.item() - 0.19_41) < 1E-3
| 642
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = ["image_processor", "tokenizer"]
A : Optional[int] = "ViTImageProcessor"
A : Optional[Any] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Optional[Any] , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str]) -> Tuple:
"""simple docstring"""
lowercase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase , )
lowercase__ = kwargs.pop('feature_extractor')
lowercase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowerCAmelCase , lowerCAmelCase)
def __call__( self : Dict , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Any=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : Tuple) -> int:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.')
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.')
if text is not None:
lowercase__ = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
if visual_prompt is not None:
lowercase__ = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
if images is not None:
lowercase__ = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
if visual_prompt is not None and images is not None:
lowercase__ = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowercase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowercase__ = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase) , tensor_type=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : str) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase)
@property
def UpperCAmelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase , )
return self.image_processor
| 642
|
import cva
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : float , lowerCAmelCase : int) -> Dict:
"""simple docstring"""
if k in (0.04, 0.06):
lowercase__ = k
lowercase__ = window_size
else:
raise ValueError('invalid k value')
def __str__( self : Tuple) -> str:
"""simple docstring"""
return str(self.k)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowercase__ = cva.imread(lowerCAmelCase , 0)
lowercase__, lowercase__ = img.shape
lowercase__ = []
lowercase__ = img.copy()
lowercase__ = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB)
lowercase__, lowercase__ = np.gradient(lowerCAmelCase)
lowercase__ = dx**2
lowercase__ = dy**2
lowercase__ = dx * dy
lowercase__ = 0.04
lowercase__ = self.window_size // 2
for y in range(lowerCAmelCase , h - offset):
for x in range(lowerCAmelCase , w - offset):
lowercase__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = (wxx * wyy) - (wxy**2)
lowercase__ = wxx + wyy
lowercase__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 2_55)
return color_img, corner_list
if __name__ == "__main__":
a__ : Dict = HarrisCorner(0.0_4, 3)
a__ , a__ : Dict = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 642
| 1
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = torch.nn.Linear(10 , 10)
lowercase__ = torch.optim.SGD(model.parameters() , 0.1)
lowercase__ = Accelerator()
lowercase__ = accelerator.prepare(lowerCAmelCase)
try:
pickle.loads(pickle.dumps(lowerCAmelCase))
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''')
AcceleratorState._reset_state()
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : List[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : int = "speech_to_text"
A : Optional[Any] = ["past_key_values"]
A : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[int] , lowerCAmelCase : Tuple=1_00_00 , lowerCAmelCase : int=12 , lowerCAmelCase : int=20_48 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : str=6 , lowerCAmelCase : Dict=20_48 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict="relu" , lowerCAmelCase : Tuple=2_56 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Any=60_00 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=(5, 5) , lowerCAmelCase : Union[str, Any]=10_24 , lowerCAmelCase : List[Any]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
lowercase__ = num_conv_layers
lowercase__ = list(lowerCAmelCase)
lowercase__ = conv_channels
lowercase__ = input_feat_per_channel
lowercase__ = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''')
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 642
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : List[str] = logging.get_logger(__name__)
a__ : List[Any] = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
A : List[str] = "focalnet"
def __init__( self : Dict , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=96 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int=[1_92, 3_84, 7_68, 7_68] , lowerCAmelCase : str=[2, 2, 6, 2] , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[3, 3, 3, 3] , lowerCAmelCase : int="gelu" , lowerCAmelCase : Any=4.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Tuple=1E-4 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = use_conv_embed
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = focal_levels
lowercase__ = focal_windows
lowercase__ = hidden_act
lowercase__ = mlp_ratio
lowercase__ = hidden_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = use_layerscale
lowercase__ = layerscale_value
lowercase__ = use_post_layernorm
lowercase__ = use_post_layernorm_in_modulation
lowercase__ = normalize_modulator
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = encoder_stride
lowercase__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)]
lowercase__, lowercase__ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
a__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : AutoencoderKL , lowerCAmelCase : CLIPTextModel , lowerCAmelCase : CLIPTokenizer , lowerCAmelCase : UNetaDConditionModel , lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase : StableDiffusionSafetyChecker , lowerCAmelCase : CLIPImageProcessor , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , unet=lowerCAmelCase , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , )
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Optional[Union[str, int]] = "auto") -> List[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
self.enable_attention_slicing(lowerCAmelCase)
@torch.no_grad()
def __call__( self : Dict , lowerCAmelCase : Union[str, List[str]] , lowerCAmelCase : int = 5_12 , lowerCAmelCase : int = 5_12 , lowerCAmelCase : int = 50 , lowerCAmelCase : float = 7.5 , lowerCAmelCase : Optional[Union[str, List[str]]] = None , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[torch.FloatTensor] = None , **lowerCAmelCase : str , ) -> List[Any]:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase):
lowercase__ = 1
elif isinstance(lowerCAmelCase , lowerCAmelCase):
lowercase__ = len(lowerCAmelCase)
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase)}''')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''')
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase , lowerCAmelCase) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(lowerCAmelCase)}.''')
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''')
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase__ = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__, lowercase__, lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1 , lowerCAmelCase , 1)
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCAmelCase , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = ['']
elif type(lowerCAmelCase) is not type(lowerCAmelCase):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase)} !='''
f''' {type(lowerCAmelCase)}.''')
elif isinstance(lowerCAmelCase , lowerCAmelCase):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCAmelCase):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase)}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.')
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='pt' , )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(lowerCAmelCase , lowerCAmelCase , 1)
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCAmelCase , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(
lowerCAmelCase , generator=lowerCAmelCase , device='cpu' , dtype=lowerCAmelCase).to(self.device)
lowercase__ = torch.randn(lowerCAmelCase , generator=lowerCAmelCase , device='cpu' , dtype=lowerCAmelCase).to(
self.device)
else:
lowercase__ = torch.randn(
lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=lowerCAmelCase)
lowercase__ = torch.randn(lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=lowerCAmelCase)
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''')
lowercase__ = latents_reference.to(self.device)
lowercase__ = latents.to(self.device)
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase__ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase__ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase__ = 0 if dx < 0 else dx
lowercase__ = 0 if dy < 0 else dy
lowercase__ = max(-dx , 0)
lowercase__ = max(-dy , 0)
# import pdb
# pdb.set_trace()
lowercase__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCAmelCase)):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase)
# predict the noise residual
lowercase__ = self.unet(lowerCAmelCase , lowerCAmelCase , encoder_hidden_states=lowerCAmelCase).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__, lowercase__ = noise_pred.chunk(2)
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
lowercase__ = 1 / 0.1_82_15 * latents
lowercase__ = self.vae.decode(lowerCAmelCase).sample
lowercase__ = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if self.safety_checker is not None:
lowercase__ = self.feature_extractor(self.numpy_to_pil(lowerCAmelCase) , return_tensors='pt').to(
self.device)
lowercase__, lowercase__ = self.safety_checker(
images=lowerCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype))
else:
lowercase__ = None
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCAmelCase)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCAmelCase , nsfw_content_detected=lowerCAmelCase)
| 642
|
# Imports
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None) -> Dict:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : str=None) -> int:
"""simple docstring"""
if red is not None:
lowercase__ = red
if green is not None:
lowercase__ = green
if blue is not None:
lowercase__ = blue
if red_edge is not None:
lowercase__ = red_edge
if nir is not None:
lowercase__ = nir
return True
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, Any]="" , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None) -> Union[str, Any]:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
lowercase__ = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!')
return False
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]=0.08 , lowerCAmelCase : Optional[int]=1.22 , lowerCAmelCase : int=0.03) -> List[Any]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
return self.nir - self.green
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def UpperCAmelCase ( self : int , lowerCAmelCase : int=0.16) -> Dict:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[int]=0.5) -> Union[str, Any]:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None) -> Tuple:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
lowercase__ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
lowercase__ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 642
| 1
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _lowerCAmelCase ( A__ = "laptop" ):
lowercase__ = F'''https://www.amazon.in/laptop/s?k={product}'''
lowercase__ = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
lowercase__ = BeautifulSoup(requests.get(A__ , headers=A__ ).text )
# Initialize a Pandas dataframe with the column titles
lowercase__ = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
lowercase__ = item.ha.text
lowercase__ = 'https://www.amazon.in/' + item.ha.a['href']
lowercase__ = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
lowercase__ = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
lowercase__ = 'Not available'
try:
lowercase__ = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
lowercase__ = ''
try:
lowercase__ = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
lowercase__ = float('nan' )
except AttributeError:
pass
lowercase__ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowercase__ = ' '
lowercase__ = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
a__ : Any = "headphones"
get_amazon_product_data(product).to_csv(F'''Amazon Product Data for {product}.csv''')
| 642
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = load_tool('text-classification')
self.tool.setup()
lowercase__ = load_tool('text-classification' , remote=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
lowercase__ = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
lowercase__ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
| 642
| 1
|
from __future__ import annotations
import math
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , A__ , A__ , A__ ) , minimax(depth + 1 , node_index * 2 + 1 , A__ , A__ , A__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , A__ , A__ , A__ ) , minimax(depth + 1 , node_index * 2 + 1 , A__ , A__ , A__ ) , )
)
def _lowerCAmelCase ( ):
lowercase__ = [90, 23, 6, 33, 21, 65, 123, 34_423]
lowercase__ = math.log(len(A__ ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , A__ , A__ , A__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 642
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = None
A : Optional[int] = None
@property
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size'))
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate'))
self.assertTrue(hasattr(lowerCAmelCase , 'padding_value'))
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name])))
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = self.feat_extract_tester.seq_length_diff
lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowercase__ = self.feat_extract_tester.min_seq_length
lowercase__ = self.feat_extract_tester.batch_size
lowercase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest')
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1]))
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
lowercase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
# truncate to smallest
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]))
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to smallest with np
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to middle
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = 12
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , )
lowercase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase__ = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
@require_torch
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = min(lowerCAmelCase)
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 642
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Union[str, Any] = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowerCAmelCase ( A__ ):
lowercase__ = prime_factors(A__ )
if is_square_free(A__ ):
return -1 if len(A__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCAmelCase ( A__ ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(A__ , '_dynamo' ):
return False
return isinstance(A__ , torch._dynamo.eval_frame.OptimizedModule )
def _lowerCAmelCase ( A__ , A__ = True ):
lowercase__ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase__ = is_compiled_module(A__ )
if is_compiled:
lowercase__ = model
lowercase__ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(A__ , A__ ):
lowercase__ = model.module
if not keep_fpaa_wrapper:
lowercase__ = getattr(A__ , 'forward' )
lowercase__ = model.__dict__.pop('_original_forward' , A__ )
if original_forward is not None:
while hasattr(A__ , '__wrapped__' ):
lowercase__ = forward.__wrapped__
if forward == original_forward:
break
lowercase__ = forward
if getattr(A__ , '_converted_to_transformer_engine' , A__ ):
convert_model(A__ , to_transformer_engine=A__ )
if is_compiled:
lowercase__ = model
lowercase__ = compiled_model
return model
def _lowerCAmelCase ( ):
PartialState().wait_for_everyone()
def _lowerCAmelCase ( A__ , A__ ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(A__ , A__ )
elif PartialState().local_process_index == 0:
torch.save(A__ , A__ )
@contextmanager
def _lowerCAmelCase ( **A__ ):
for key, value in kwargs.items():
lowercase__ = str(A__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCAmelCase ( A__ ):
if not hasattr(A__ , '__qualname__' ) and not hasattr(A__ , '__name__' ):
lowercase__ = getattr(A__ , '__class__' , A__ )
if hasattr(A__ , '__qualname__' ):
return obj.__qualname__
if hasattr(A__ , '__name__' ):
return obj.__name__
return str(A__ )
def _lowerCAmelCase ( A__ , A__ ):
for key, value in source.items():
if isinstance(A__ , A__ ):
lowercase__ = destination.setdefault(A__ , {} )
merge_dicts(A__ , A__ )
else:
lowercase__ = value
return destination
def _lowerCAmelCase ( A__ = None ):
if port is None:
lowercase__ = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : List[str] = logging.get_logger(__name__)
a__ : List[Any] = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
A : List[str] = "focalnet"
def __init__( self : Dict , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=96 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int=[1_92, 3_84, 7_68, 7_68] , lowerCAmelCase : str=[2, 2, 6, 2] , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[3, 3, 3, 3] , lowerCAmelCase : int="gelu" , lowerCAmelCase : Any=4.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Tuple=1E-4 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = use_conv_embed
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = focal_levels
lowercase__ = focal_windows
lowercase__ = hidden_act
lowercase__ = mlp_ratio
lowercase__ = hidden_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = use_layerscale
lowercase__ = layerscale_value
lowercase__ = use_post_layernorm
lowercase__ = use_post_layernorm_in_modulation
lowercase__ = normalize_modulator
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = encoder_stride
lowercase__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)]
lowercase__, lowercase__ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
| 642
| 1
|
from __future__ import annotations
def _lowerCAmelCase ( A__ ):
lowercase__ = [True] * limit
lowercase__ = False
lowercase__ = False
lowercase__ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowercase__ = i * 2
while index < limit:
lowercase__ = False
lowercase__ = index + i
lowercase__ = [2]
for i in range(3 , A__ , 2 ):
if is_prime[i]:
primes.append(A__ )
return primes
def _lowerCAmelCase ( A__ = 1_000_000 ):
lowercase__ = prime_sieve(A__ )
lowercase__ = 0
lowercase__ = 0
for i in range(len(A__ ) ):
for j in range(i + length , len(A__ ) ):
lowercase__ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase__ = j - i
lowercase__ = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 642
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
a__ : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
a__ : Any = {"facebook/blenderbot_small-90M": 5_12}
def _lowerCAmelCase ( A__ ):
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
lowercase__ = set(A__ )
return pairs
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[str] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : int="__start__" , lowerCAmelCase : Dict="__end__" , lowerCAmelCase : Any="__unk__" , lowerCAmelCase : str="__null__" , **lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase)
with open(lowerCAmelCase , encoding='utf-8') as vocab_handle:
lowercase__ = json.load(lowerCAmelCase)
lowercase__ = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase , encoding='utf-8') as merges_handle:
lowercase__ = merges_handle.read().split('\n')[1:-1]
lowercase__ = [tuple(merge.split()) for merge in merges]
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = {}
@property
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return len(self.encoder)
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self : str , lowerCAmelCase : str) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ = re.sub('([.,!?()])' , R' \1' , lowerCAmelCase)
lowercase__ = re.sub('(\')' , R' \1 ' , lowerCAmelCase)
lowercase__ = re.sub(R'\s{2,}' , ' ' , lowerCAmelCase)
if "\n" in token:
lowercase__ = token.replace('\n' , ' __newln__')
lowercase__ = token.split(' ')
lowercase__ = []
for token in tokens:
if not len(lowerCAmelCase):
continue
lowercase__ = token.lower()
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = tuple(list(word[:-1]) + [word[-1] + '</w>'])
lowercase__ = get_pairs(lowerCAmelCase)
if not pairs:
words.append(lowerCAmelCase)
continue
while True:
lowercase__ = min(lowerCAmelCase , key=lambda lowerCAmelCase: self.bpe_ranks.get(lowerCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__, lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(lowerCAmelCase):
try:
lowercase__ = word.index(lowerCAmelCase , lowerCAmelCase)
new_word.extend(word[i:j])
lowercase__ = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowerCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = new_word
if len(lowerCAmelCase) == 1:
break
else:
lowercase__ = get_pairs(lowerCAmelCase)
lowercase__ = '@@ '.join(lowerCAmelCase)
lowercase__ = word[:-4]
lowercase__ = word
words.append(lowerCAmelCase)
return " ".join(lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = re.findall(R'\S+\n?' , lowerCAmelCase)
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase).split(' ')))
return split_tokens
def UpperCAmelCase ( self : int , lowerCAmelCase : str) -> int:
"""simple docstring"""
lowercase__ = token.lower()
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[str]) -> str:
"""simple docstring"""
lowercase__ = ' '.join(lowerCAmelCase).replace('@@ ' , '').strip()
return out_string
def UpperCAmelCase ( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(lowerCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase) + '\n')
lowercase__ = 0
with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!')
lowercase__ = token_index
writer.write(' '.join(lowerCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
| 642
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Dict = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
# Imports
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None) -> Dict:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : str=None) -> int:
"""simple docstring"""
if red is not None:
lowercase__ = red
if green is not None:
lowercase__ = green
if blue is not None:
lowercase__ = blue
if red_edge is not None:
lowercase__ = red_edge
if nir is not None:
lowercase__ = nir
return True
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, Any]="" , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None) -> Union[str, Any]:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
lowercase__ = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!')
return False
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]=0.08 , lowerCAmelCase : Optional[int]=1.22 , lowerCAmelCase : int=0.03) -> List[Any]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
return self.nir - self.green
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def UpperCAmelCase ( self : int , lowerCAmelCase : int=0.16) -> Dict:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[int]=0.5) -> Union[str, Any]:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None) -> Tuple:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
lowercase__ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
lowercase__ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 642
|
import heapq
import sys
import numpy as np
a__ : Dict = tuple[int, int]
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = []
lowercase__ = set()
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf')
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return len(self.elements) == 0
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(lowerCAmelCase)
else:
# update
# print("update", item)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
if item in self.set:
self.set.remove(lowerCAmelCase)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
self.set.remove(lowerCAmelCase)
return (priority, item)
def _lowerCAmelCase ( A__ , A__ ):
# euclidean distance
lowercase__ = np.array(A__ )
lowercase__ = np.array(A__ )
return np.linalg.norm(a - b )
def _lowerCAmelCase ( A__ , A__ ):
# integer division by time variable
return consistent_heuristic(A__ , A__ ) // t
def _lowerCAmelCase ( A__ , A__ ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ )
return ans
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = np.chararray((n, n) )
for i in range(A__ ):
for j in range(A__ ):
lowercase__ = '*'
for i in range(A__ ):
for j in range(A__ ):
if (j, (n - 1) - i) in blocks:
lowercase__ = '#'
lowercase__ = '-'
lowercase__ = back_pointer[goal]
while x != start:
((lowercase__), (lowercase__)) = x
# print(x)
lowercase__ = '-'
lowercase__ = back_pointer[x]
lowercase__ = '-'
for i in range(A__ ):
for j in range(A__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowercase__ = back_pointer[goal]
while x != start:
print(A__ , end=' ' )
lowercase__ = back_pointer[x]
print(A__ )
sys.exit()
def _lowerCAmelCase ( A__ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
for itera in range(A__ ):
open_list[itera].remove_element(A__ )
# print("s", s)
# print("j", j)
((lowercase__), (lowercase__)) = s
lowercase__ = (x - 1, y)
lowercase__ = (x + 1, y)
lowercase__ = (x, y + 1)
lowercase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(A__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(A__ )
lowercase__ = -1
lowercase__ = float('inf' )
if valid(A__ ) and g_function[neighbours] > g_function[s] + 1:
lowercase__ = g_function[s] + 1
lowercase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) )
if neighbours not in close_list_inad:
for var in range(1 , A__ ):
if key(A__ , A__ , A__ , A__ ) <= Wa * key(
A__ , 0 , A__ , A__ ):
open_list[j].put(
A__ , key(A__ , A__ , A__ , A__ ) )
def _lowerCAmelCase ( ):
lowercase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a__ : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a__ : Any = make_common_ground()
a__ : Union[str, Any] = blocks_blk
# hyper parameters
a__ : List[Any] = 1
a__ : List[str] = 1
a__ : Optional[int] = 20
a__ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a__ : Tuple = (0, 0)
a__ : str = (n - 1, n - 1)
a__ : Optional[Any] = 1
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = {start: 0, goal: float('inf' )}
lowercase__ = {start: -1, goal: -1}
lowercase__ = []
lowercase__ = set()
for i in range(A__ ):
open_list.append(PriorityQueue() )
open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) )
lowercase__ = []
lowercase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , A__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__, lowercase__ = open_list[i].top_show()
visited.add(A__ )
expand_state(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_inad.append(A__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__ = open_list[0].top_show()
visited.add(A__ )
expand_state(
A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_anchor.append(A__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(A__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 642
| 1
|
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
if height >= 1:
move_tower(height - 1 , A__ , A__ , A__ )
move_disk(A__ , A__ )
move_tower(height - 1 , A__ , A__ , A__ )
def _lowerCAmelCase ( A__ , A__ ):
print('moving disk from' , A__ , 'to' , A__ )
def _lowerCAmelCase ( ):
lowercase__ = int(input('Height of hanoi: ' ).strip() )
move_tower(A__ , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 642
|
import math
import sys
def _lowerCAmelCase ( A__ ):
lowercase__ = ''
try:
with open(A__ , 'rb' ) as binary_file:
lowercase__ = binary_file.read()
for dat in data:
lowercase__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = {'0': '0', '1': '1'}
lowercase__, lowercase__ = '', ''
lowercase__ = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__ = lexicon[curr_string]
result += last_match_id
lowercase__ = last_match_id + '0'
if math.loga(A__ ).is_integer():
lowercase__ = {}
for curr_key in list(A__ ):
lowercase__ = lexicon.pop(A__ )
lowercase__ = new_lex
lowercase__ = last_match_id + '1'
index += 1
lowercase__ = ''
return result
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 8
try:
with open(A__ , 'wb' ) as opened_file:
lowercase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase__ = data_bits[counter:]
lowercase__ = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = read_file_binary(A__ )
lowercase__ = remove_prefix(A__ )
lowercase__ = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 642
| 1
|
a__ : Any = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
# Return True if there is node that has not iterated.
lowercase__ = [False] * len(A__ )
lowercase__ = [s]
lowercase__ = True
while queue:
lowercase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A__ )
lowercase__ = True
lowercase__ = u
return visited[t]
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = [-1] * (len(A__ ))
lowercase__ = 0
lowercase__ = []
lowercase__ = [i[:] for i in graph] # Record original cut, copy.
while bfs(A__ , A__ , A__ , A__ ):
lowercase__ = float('Inf' )
lowercase__ = sink
while s != source:
# Find the minimum value in select path
lowercase__ = min(A__ , graph[parent[s]][s] )
lowercase__ = parent[s]
max_flow += path_flow
lowercase__ = sink
while v != source:
lowercase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase__ = parent[v]
for i in range(len(A__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 642
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {"vocab_file": "vocab.txt"}
a__ : int = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
a__ : Dict = {
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def _lowerCAmelCase ( A__ ):
with open(A__ , 'r' ) as f:
lowercase__ = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Dict="<cls>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Optional[Any]="<eos>" , **lowerCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = load_vocab_file(lowerCAmelCase)
lowercase__ = dict(enumerate(self.all_tokens))
lowercase__ = {tok: ind for ind, tok in enumerate(self.all_tokens)}
lowercase__ = unk_token
lowercase__ = cls_token
lowercase__ = pad_token
lowercase__ = mask_token
lowercase__ = eos_token
lowercase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
return text.split()
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any=False) -> Union[str, Any]:
"""simple docstring"""
return len(self._id_to_token)
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens)}
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.cls_token_id]
lowercase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase__ = [1] + ([0] * len(lowerCAmelCase)) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCAmelCase) + [1]
return mask
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(lowerCAmelCase , 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[List[str], List[AddedToken]] , lowerCAmelCase : bool = False) -> int:
"""simple docstring"""
return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase)
| 642
| 1
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCAmelCase ( ):
lowercase__ = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=A__ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=A__ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=A__ )
return parser.parse_args()
def _lowerCAmelCase ( ):
lowercase__ = parse_args()
# Import training_script as a module.
lowercase__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase__ = script_fpath.stem
lowercase__ = importlib.import_module(A__ )
# Patch sys.argv
lowercase__ = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 642
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a__ : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a__ : Optional[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a__ : Tuple = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def UpperCAmelCase ( self : int , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase)
}
| 642
| 1
|
def _lowerCAmelCase ( A__ ):
lowercase__ = [0 for i in range(len(A__ ) )]
# initialize interval's left pointer and right pointer
lowercase__, lowercase__ = 0, 0
for i in range(1 , len(A__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowercase__ = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowercase__ = min_edge
while go_next(A__ , A__ , A__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowercase__, lowercase__ = i, i + z_result[i] - 1
return z_result
def _lowerCAmelCase ( A__ , A__ , A__ ):
return i + z_result[i] < len(A__ ) and s[z_result[i]] == s[i + z_result[i]]
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowercase__ = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(A__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = block_sizes
lowercase__ = num_decoder_layers
lowercase__ = d_model
lowercase__ = n_head
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = 2
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase__ = n_head
# Used in the tests to check the size of the first hidden state
lowercase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase__ = self.num_hidden_layers + 2
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str:
"""simple docstring"""
lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase)
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A : Dict = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Optional[int] = False
A : Optional[int] = False
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase)
@require_tf
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A : List[str] = False
A : int = False
def UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
| 642
| 1
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _lowerCAmelCase ( A__ ):
lowercase__ = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _lowerCAmelCase ( A__ ):
lowercase__, lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(A__ , A__ , bias=A__ )
lowercase__ = emb.weight.data
return lin_layer
def _lowerCAmelCase ( A__ ):
lowercase__ = torch.load(A__ , map_location='cpu' )
lowercase__ = Namespace(**checkpoint['cfg']['model'] )
lowercase__ = checkpoint['model']
remove_ignore_keys_(A__ )
lowercase__ = state_dict['decoder.embed_tokens.weight'].shape[0]
lowercase__ = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
lowercase__ = XGLMConfig(
vocab_size=A__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowercase__ = XGLMForCausalLM(A__ )
lowercase__ = model.load_state_dict(A__ , strict=A__ )
print(A__ )
lowercase__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
a__ : Union[str, Any] = parser.parse_args()
a__ : Union[str, Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 642
|
def _lowerCAmelCase ( A__ , A__ , A__ ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
lowercase__ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowercase__ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
import math
import sys
import cva
import numpy as np
def _lowerCAmelCase ( A__ , A__ ):
# For applying gaussian function for each element in matrix.
lowercase__ = math.sqrt(A__ )
lowercase__ = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def _lowerCAmelCase ( A__ , A__ ):
# Creates a gaussian kernel of given dimension.
lowercase__ = np.zeros((kernel_size, kernel_size) )
for i in range(0 , A__ ):
for j in range(0 , A__ ):
lowercase__ = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(A__ , A__ )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , ):
lowercase__ = np.zeros(img.shape )
lowercase__ = get_gauss_kernel(A__ , A__ )
lowercase__, lowercase__ = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowercase__ = get_slice(A__ , A__ , A__ , A__ )
lowercase__ = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowercase__ = vec_gaussian(A__ , A__ )
lowercase__ = np.multiply(A__ , A__ )
lowercase__ = np.multiply(A__ , A__ )
lowercase__ = np.sum(A__ ) / np.sum(A__ )
lowercase__ = val
return imga
def _lowerCAmelCase ( A__ ):
lowercase__ = args[1] if args[1:] else '../image_data/lena.jpg'
lowercase__ = float(args[2] ) if args[2:] else 1.0
lowercase__ = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowercase__ = int(args[4] )
lowercase__ = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowercase__ = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a__ , a__ , a__ , a__ : List[Any] = parse_args(sys.argv)
a__ : Any = cva.imread(filename, 0)
cva.imshow("input image", img)
a__ : Optional[int] = img / 2_55
a__ : List[Any] = out.astype("float32")
a__ : Tuple = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a__ : Optional[Any] = out * 2_55
a__ : Dict = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 642
|
from __future__ import annotations
def _lowerCAmelCase ( A__ , A__ ):
if b == 0:
return (1, 0)
((lowercase__), (lowercase__)) = extended_euclid(A__ , a % b )
lowercase__ = a // b
return (y, x - k * y)
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowerCAmelCase ( A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
if b < 0:
lowercase__ = (b % n + n) % n
return b
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__, lowercase__ = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 642
| 1
|
from __future__ import annotations
from collections.abc import Generator
def _lowerCAmelCase ( ):
lowercase__ = {}
lowercase__ = 2
while True:
lowercase__ = factor_map.pop(A__ , A__ )
if factor:
lowercase__ = factor + prime
while x in factor_map:
x += factor
lowercase__ = factor
else:
lowercase__ = prime
yield prime
prime += 1
def _lowerCAmelCase ( A__ = 1E1_0 ):
lowercase__ = sieve()
lowercase__ = 1
while True:
lowercase__ = next(A__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(A__ )
n += 2
if __name__ == "__main__":
print(solution())
| 642
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[Any] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = "umt5"
A : List[str] = ["past_key_values"]
def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str:
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_kv
lowercase__ = d_ff
lowercase__ = num_layers
lowercase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ = num_heads
lowercase__ = relative_attention_num_buckets
lowercase__ = relative_attention_max_distance
lowercase__ = dropout_rate
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_factor
lowercase__ = feed_forward_proj
lowercase__ = use_cache
lowercase__ = self.feed_forward_proj.split('-')
lowercase__ = act_info[-1]
lowercase__ = act_info[0] == 'gated'
if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
lowercase__ = 'gelu_new'
@property
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
return self.d_model
@property
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return self.num_heads
@property
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return self.num_layers
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase__ = 'past_encoder_sequence + sequence'
lowercase__ = {0: 'batch'}
lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return 13
@property
def UpperCAmelCase ( self : Optional[Any]) -> float:
"""simple docstring"""
return 5E-4
| 642
| 1
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = 1.5
lowercase__ = int(factor * num_class_images )
lowercase__ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=A__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=A__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase__ = client.query(text=A__ )
if len(A__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
lowercase__ = int(factor * num_images )
lowercase__ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=A__ , aesthetic_weight=0.1 , )
lowercase__ = 0
lowercase__ = 0
lowercase__ = tqdm(desc='downloading real regularization images' , total=A__ )
with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open(
F'''{class_data_dir}/images.txt''' , 'w' ) as fa:
while total < num_class_images:
lowercase__ = class_images[count]
count += 1
try:
lowercase__ = requests.get(images['url'] )
if img.status_code == 200:
lowercase__ = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser('' , add_help=A__ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=A__ , type=A__ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=A__ , type=A__ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=A__ )
return parser.parse_args()
if __name__ == "__main__":
a__ : int = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 642
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : str = XGLMTokenizer
A : List[Any] = XGLMTokenizerFast
A : int = True
A : Optional[Any] = True
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = '<pad>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(len(lowerCAmelCase) , 10_08)
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_08)
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
lowercase__ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M')
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name)
lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase)
lowercase__ = pickle.dumps(lowerCAmelCase)
pickle.loads(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
lowercase__ = 'Hello World!'
lowercase__ = [2, 3_12_27, 44_47, 35]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = {
'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
| 642
| 1
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
a__ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 642
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = data
lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64)
lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data))
return padded_data
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]:
"""simple docstring"""
lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64
for i in range(16 , 80):
lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.padding()
lowercase__ = self.split_blocks()
for block in self.blocks:
lowercase__ = self.expand_block(lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h
for i in range(0 , 80):
if 0 <= i < 20:
lowercase__ = (b & c) | ((~b) & d)
lowercase__ = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase__ = b ^ c ^ d
lowercase__ = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
lowercase__ = (b & c) | (b & d) | (c & d)
lowercase__ = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
lowercase__ = b ^ c ^ d
lowercase__ = 0XC_A_6_2_C_1_D_6
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = (
self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(lowerCAmelCase , 30),
c,
d,
)
lowercase__ = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h)
def _lowerCAmelCase ( ):
lowercase__ = B'Test String'
assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(A__ , 'utf-8' )
print(SHAaHash(A__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 642
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase )
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
A : ClassVar[Features] = Features({"audio": Audio()} )
A : ClassVar[Features] = Features({"transcription": Value("string" )} )
A : str = "audio"
A : str = "transcription"
def UpperCAmelCase ( self : str , lowerCAmelCase : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''')
if not isinstance(features[self.audio_column] , lowerCAmelCase):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''')
lowercase__ = copy.deepcopy(self)
lowercase__ = self.input_schema.copy()
lowercase__ = features[self.audio_column]
lowercase__ = input_schema
return task_template
@property
def UpperCAmelCase ( self : List[str]) -> Dict[str, str]:
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 642
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a__ : List[Any] = logging.get_logger(__name__)
a__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
a__ : List[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
a__ : int = {
"facebook/bart-base": 10_24,
"facebook/bart-large": 10_24,
"facebook/bart-large-mnli": 10_24,
"facebook/bart-large-cnn": 10_24,
"facebook/bart-large-xsum": 10_24,
"yjernite/bart_eli5": 10_24,
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = ["input_ids", "attention_mask"]
A : Any = BartTokenizer
def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="replace" , lowerCAmelCase : str="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = getattr(lowerCAmelCase , pre_tok_state.pop('type'))
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**lowerCAmelCase)
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = 'post_processor'
lowercase__ = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state['sep'])
if "cls" in state:
lowercase__ = tuple(state['cls'])
lowercase__ = False
if state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get('trim_offsets' , lowerCAmelCase) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(lowerCAmelCase , state.pop('type'))
lowercase__ = component_class(**lowerCAmelCase)
setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
@property
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else value
lowercase__ = value
def UpperCAmelCase ( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None) -> Tuple:
"""simple docstring"""
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 642
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a__ : int = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : str = (DDIMParallelScheduler,)
A : Any = (("eta", 0.0), ("num_inference_steps", 50))
def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowerCAmelCase)
return config
def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCAmelCase)
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase)
for t in scheduler.timesteps:
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample
return sample
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase)
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1)
lowercase__ = scheduler_class(**lowerCAmelCase)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1]))
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]):
self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = self.dummy_sample_deter + 0.1
lowercase__ = self.dummy_sample_deter - 0.1
lowercase__ = samplea.shape[0]
lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0)
lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase)
lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 11_47.79_04) < 1E-2
assert abs(result_mean.item() - 0.49_82) < 1E-3
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_72.00_67) < 1E-2
assert abs(result_mean.item() - 0.22_39_67) < 1E-3
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(prediction_type='v_prediction')
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 52.53_02) < 1E-2
assert abs(result_mean.item() - 0.06_84) < 1E-3
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.82_95) < 1E-2
assert abs(result_mean.item() - 0.19_51) < 1E-3
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.07_84) < 1E-2
assert abs(result_mean.item() - 0.19_41) < 1E-3
| 642
| 1
|
from __future__ import annotations
from collections.abc import Iterator
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : int) -> None:
"""simple docstring"""
lowercase__ = value
lowercase__ = None
lowercase__ = None
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase : Node) -> None:
"""simple docstring"""
lowercase__ = tree
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Node | None) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left) + self.depth_first_search(node.right)
)
def __iter__( self : Optional[Any]) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
|
import cva
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : float , lowerCAmelCase : int) -> Dict:
"""simple docstring"""
if k in (0.04, 0.06):
lowercase__ = k
lowercase__ = window_size
else:
raise ValueError('invalid k value')
def __str__( self : Tuple) -> str:
"""simple docstring"""
return str(self.k)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowercase__ = cva.imread(lowerCAmelCase , 0)
lowercase__, lowercase__ = img.shape
lowercase__ = []
lowercase__ = img.copy()
lowercase__ = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB)
lowercase__, lowercase__ = np.gradient(lowerCAmelCase)
lowercase__ = dx**2
lowercase__ = dy**2
lowercase__ = dx * dy
lowercase__ = 0.04
lowercase__ = self.window_size // 2
for y in range(lowerCAmelCase , h - offset):
for x in range(lowerCAmelCase , w - offset):
lowercase__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = (wxx * wyy) - (wxy**2)
lowercase__ = wxx + wyy
lowercase__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 2_55)
return color_img, corner_list
if __name__ == "__main__":
a__ : Dict = HarrisCorner(0.0_4, 3)
a__ , a__ : Dict = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 642
| 1
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase__( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase : float , lowerCAmelCase : Callable , lowerCAmelCase : int , lowerCAmelCase : float = 1.0 , lowerCAmelCase : str = None , ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = initial_learning_rate
lowercase__ = warmup_steps
lowercase__ = power
lowercase__ = decay_schedule_fn
lowercase__ = name
def __call__( self : Optional[int] , lowerCAmelCase : str) -> Any:
"""simple docstring"""
with tf.name_scope(self.name or 'WarmUp') as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowercase__ = tf.cast(lowerCAmelCase , tf.floataa)
lowercase__ = tf.cast(self.warmup_steps , tf.floataa)
lowercase__ = global_step_float / warmup_steps_float
lowercase__ = self.initial_learning_rate * tf.math.pow(lowerCAmelCase , self.power)
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps) , name=lowerCAmelCase , )
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowerCAmelCase ( A__ , A__ , A__ , A__ = 0.0 , A__ = 0.9 , A__ = 0.9_99 , A__ = 1E-8 , A__ = None , A__ = None , A__ = 0.0 , A__ = 1.0 , A__ = None , ):
lowercase__ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=A__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=A__ , )
if num_warmup_steps:
lowercase__ = WarmUp(
initial_learning_rate=A__ , decay_schedule_fn=A__ , warmup_steps=A__ , )
if weight_decay_rate > 0.0:
lowercase__ = AdamWeightDecay(
learning_rate=A__ , weight_decay_rate=A__ , beta_a=A__ , beta_a=A__ , epsilon=A__ , clipnorm=A__ , global_clipnorm=A__ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=A__ , )
else:
lowercase__ = tf.keras.optimizers.Adam(
learning_rate=A__ , beta_a=A__ , beta_a=A__ , epsilon=A__ , clipnorm=A__ , global_clipnorm=A__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_01 , lowerCAmelCase : float = 0.9 , lowerCAmelCase : float = 0.9_99 , lowerCAmelCase : float = 1E-7 , lowerCAmelCase : bool = False , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "AdamWeightDecay" , **lowerCAmelCase : Dict , ) -> List[str]:
"""simple docstring"""
super().__init__(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase)
lowercase__ = weight_decay_rate
lowercase__ = include_in_weight_decay
lowercase__ = exclude_from_weight_decay
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Tuple:
"""simple docstring"""
lowercase__ = {'WarmUp': WarmUp}
return super(lowerCAmelCase , cls).from_config(lowerCAmelCase , custom_objects=lowerCAmelCase)
def UpperCAmelCase ( self : str , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Any) -> Any:
"""simple docstring"""
super(lowerCAmelCase , self)._prepare_local(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
lowercase__ = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate')
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None , **lowerCAmelCase : Optional[Any]) -> Dict:
"""simple docstring"""
lowercase__, lowercase__ = list(zip(*lowerCAmelCase))
return super(lowerCAmelCase , self).apply_gradients(zip(lowerCAmelCase , lowerCAmelCase) , name=lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowercase__ = apply_state or {}
lowercase__ = apply_state.get((var_device, var_dtype))
if coefficients is None:
lowercase__ = self._fallback_apply_state(lowerCAmelCase , lowerCAmelCase)
lowercase__ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any]=None) -> str:
"""simple docstring"""
lowercase__, lowercase__ = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase)
lowercase__ = self._decay_weights_op(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
with tf.control_dependencies([decay]):
return super(lowerCAmelCase , self)._resource_apply_dense(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]=None) -> List[Any]:
"""simple docstring"""
lowercase__, lowercase__ = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase)
lowercase__ = self._decay_weights_op(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
with tf.control_dependencies([decay]):
return super(lowerCAmelCase , self)._resource_apply_sparse(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate})
return config
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Optional[Any]) -> Optional[int]:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCAmelCase , lowerCAmelCase) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCAmelCase , lowerCAmelCase) is not None:
return False
return True
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = []
lowercase__ = None
@property
def UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
if self._accum_steps is None:
lowercase__ = tf.Variable(
tf.constant(0 , dtype=tf.intaa) , trainable=lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients')
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : str , lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
if not self._gradients:
lowercase__ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCAmelCase) , trainable=lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
])
if len(lowerCAmelCase) != len(self._gradients):
raise ValueError(f'''Expected {len(self._gradients)} gradients, but got {len(lowerCAmelCase)}''')
for accum_gradient, gradient in zip(self._gradients , lowerCAmelCase):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCAmelCase)
self._accum_steps.assign_add(1)
def UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0)
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCAmelCase))
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : List[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : int = "speech_to_text"
A : Optional[Any] = ["past_key_values"]
A : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[int] , lowerCAmelCase : Tuple=1_00_00 , lowerCAmelCase : int=12 , lowerCAmelCase : int=20_48 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : str=6 , lowerCAmelCase : Dict=20_48 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict="relu" , lowerCAmelCase : Tuple=2_56 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Any=60_00 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=(5, 5) , lowerCAmelCase : Union[str, Any]=10_24 , lowerCAmelCase : List[Any]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
lowercase__ = num_conv_layers
lowercase__ = list(lowerCAmelCase)
lowercase__ = conv_channels
lowercase__ = input_feat_per_channel
lowercase__ = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''')
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 642
| 1
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
a__ : Dict = "<<<<<<< This should probably be modified because it mentions: "
a__ : Optional[Any] = "=======\n>>>>>>>\n"
a__ : int = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
a__ : Any = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def _lowerCAmelCase ( A__ ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : ArgumentParser) -> Any:
"""simple docstring"""
lowercase__ = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.')
train_parser.set_defaults(func=lowerCAmelCase)
def __init__( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : str , *lowerCAmelCase : Any) -> Dict:
"""simple docstring"""
lowercase__ = get_logger('datasets-cli/converting')
lowercase__ = tfds_path
lowercase__ = datasets_directory
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
if os.path.isdir(self._tfds_path):
lowercase__ = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
lowercase__ = os.path.dirname(self._tfds_path)
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.')
lowercase__ = os.path.abspath(self._datasets_directory)
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''')
lowercase__ = []
lowercase__ = []
lowercase__ = {}
if os.path.isdir(self._tfds_path):
lowercase__ = os.listdir(lowerCAmelCase)
else:
lowercase__ = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''')
lowercase__ = os.path.join(lowerCAmelCase , lowerCAmelCase)
lowercase__ = os.path.join(lowerCAmelCase , lowerCAmelCase)
if not os.path.isfile(lowerCAmelCase) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file')
continue
with open(lowerCAmelCase , encoding='utf-8') as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = False
lowercase__ = False
lowercase__ = []
for line in lines:
lowercase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__ = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
lowercase__ = ''
continue
elif "from absl import logging" in out_line:
lowercase__ = 'from datasets import logging\n'
elif "getLogger" in out_line:
lowercase__ = out_line.replace('getLogger' , 'get_logger')
elif any(expression in out_line for expression in TO_HIGHLIGHT):
lowercase__ = True
lowercase__ = list(filter(lambda lowerCAmelCase: e in out_line , lowerCAmelCase))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase) + '\n')
out_lines.append(lowerCAmelCase)
out_lines.append(lowerCAmelCase)
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__ = re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__ = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(','))
lowercase__ = 'from . import ' + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''')
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__ = True
out_lines.append(lowerCAmelCase)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__ = f_name.replace('.py' , '')
lowercase__ = os.path.join(lowerCAmelCase , lowerCAmelCase)
lowercase__ = os.path.join(lowerCAmelCase , lowerCAmelCase)
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase)
self._logger.info(f'''Adding directory {output_dir}''')
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase)
if needs_manual_update:
with_manual_update.append(lowerCAmelCase)
with open(lowerCAmelCase , 'w' , encoding='utf-8') as f:
f.writelines(lowerCAmelCase)
self._logger.info(f'''Converted in {output_file}''')
for utils_file in utils_files:
try:
lowercase__ = os.path.basename(lowerCAmelCase)
lowercase__ = imports_to_builder_map[f_name.replace('.py' , '')]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''')
shutil.copy(lowerCAmelCase , lowerCAmelCase)
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''')
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''')
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = data
lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64)
lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data))
return padded_data
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]:
"""simple docstring"""
lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64
for i in range(16 , 80):
lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.padding()
lowercase__ = self.split_blocks()
for block in self.blocks:
lowercase__ = self.expand_block(lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h
for i in range(0 , 80):
if 0 <= i < 20:
lowercase__ = (b & c) | ((~b) & d)
lowercase__ = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase__ = b ^ c ^ d
lowercase__ = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
lowercase__ = (b & c) | (b & d) | (c & d)
lowercase__ = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
lowercase__ = b ^ c ^ d
lowercase__ = 0XC_A_6_2_C_1_D_6
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = (
self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(lowerCAmelCase , 30),
c,
d,
)
lowercase__ = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h)
def _lowerCAmelCase ( ):
lowercase__ = B'Test String'
assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(A__ , 'utf-8' )
print(SHAaHash(A__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 642
|
# Imports
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None) -> Dict:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : str=None) -> int:
"""simple docstring"""
if red is not None:
lowercase__ = red
if green is not None:
lowercase__ = green
if blue is not None:
lowercase__ = blue
if red_edge is not None:
lowercase__ = red_edge
if nir is not None:
lowercase__ = nir
return True
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, Any]="" , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None) -> Union[str, Any]:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
lowercase__ = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!')
return False
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]=0.08 , lowerCAmelCase : Optional[int]=1.22 , lowerCAmelCase : int=0.03) -> List[Any]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
return self.nir - self.green
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def UpperCAmelCase ( self : int , lowerCAmelCase : int=0.16) -> Dict:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[int]=0.5) -> Union[str, Any]:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None) -> Tuple:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
lowercase__ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
lowercase__ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 642
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Tuple = "▁"
a__ : Union[str, Any] = {"vocab_file": "spiece.model"}
a__ : List[str] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
a__ : Dict = {
"google/pegasus-xsum": 5_12,
}
a__ : Dict = logging.get_logger(__name__)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Any = VOCAB_FILES_NAMES
A : Tuple = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]="<pad>" , lowerCAmelCase : Optional[Any]="</s>" , lowerCAmelCase : Tuple="<unk>" , lowerCAmelCase : Optional[Any]="<mask_2>" , lowerCAmelCase : Optional[Any]="<mask_1>" , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=1_03 , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[str] , ) -> None:
"""simple docstring"""
lowercase__ = offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCAmelCase)}, but is'''
f''' {type(lowerCAmelCase)}''')
lowercase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCAmelCase) , self.offset - 1)
]
if len(set(lowerCAmelCase)) != len(lowerCAmelCase):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''')
lowercase__ = additional_special_tokens_extended
else:
lowercase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset)]
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , mask_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token_sent=lowerCAmelCase , offset=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
lowercase__ = mask_token_sent
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCAmelCase)
# add special tokens to encoder dict
lowercase__ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
})
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1)})
lowercase__ = {v: k for k, v in self.encoder.items()}
@property
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
return len(self.sp_model) + self.offset
def UpperCAmelCase ( self : Dict) -> Dict[str, int]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : List[str]) -> List[str]:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : Optional[Any] , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : str) -> int:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowercase__ = self.sp_model.piece_to_id(lowerCAmelCase)
return sp_id + self.offset
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : int) -> str:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowercase__ = self.sp_model.IdToPiece(index - self.offset)
return token
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase) + token
lowercase__ = []
else:
current_sub_tokens.append(lowerCAmelCase)
out_string += self.sp_model.decode(lowerCAmelCase)
return out_string.strip()
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int=False) -> List[Any]:
"""simple docstring"""
return 1
def UpperCAmelCase ( self : str , lowerCAmelCase : Dict) -> List[Any]:
"""simple docstring"""
lowercase__ = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase)
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any]=None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase , 'wb') as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase)
return (out_vocab_file,)
| 642
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = load_tool('text-classification')
self.tool.setup()
lowercase__ = load_tool('text-classification' , remote=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
lowercase__ = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
lowercase__ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
| 642
| 1
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _lowerCAmelCase ( A__ ):
lowercase__, lowercase__ = analyze_text(A__ )
lowercase__ = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowercase__ = sum(single_char_strings.values() )
# one length string
lowercase__ = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowercase__ = single_char_strings[ch]
lowercase__ = my_str / all_sum
my_fir_sum += prob * math.loga(A__ ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowercase__ = sum(two_char_strings.values() )
lowercase__ = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowercase__ = cha + cha
if sequence in two_char_strings:
lowercase__ = two_char_strings[sequence]
lowercase__ = int(A__ ) / all_sum
my_sec_sum += prob * math.loga(A__ )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def _lowerCAmelCase ( A__ ):
lowercase__ = Counter() # type: ignore
lowercase__ = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(A__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _lowerCAmelCase ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 642
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = None
A : Optional[int] = None
@property
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size'))
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate'))
self.assertTrue(hasattr(lowerCAmelCase , 'padding_value'))
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name])))
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = self.feat_extract_tester.seq_length_diff
lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowercase__ = self.feat_extract_tester.min_seq_length
lowercase__ = self.feat_extract_tester.batch_size
lowercase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest')
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1]))
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
lowercase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
# truncate to smallest
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]))
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to smallest with np
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to middle
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = 12
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , )
lowercase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase__ = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
@require_torch
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = min(lowerCAmelCase)
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 642
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Optional[Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( A__ , A__=False , A__=False ):
lowercase__ = 'backbone.' if is_semantic else ''
lowercase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def _lowerCAmelCase ( A__ , A__ , A__=False , A__=False ):
for i in range(config.num_hidden_layers ):
lowercase__ = 'backbone.' if is_semantic else ''
# queries, keys and values
lowercase__ = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
lowercase__ = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
lowercase__ = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
lowercase__ = in_proj_weight[
: config.hidden_size, :
]
lowercase__ = q_bias
lowercase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase__ = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
lowercase__ = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
lowercase__ = gamma_a
lowercase__ = gamma_a
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = dct.pop(A__ )
lowercase__ = val
def _lowerCAmelCase ( ):
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( A__ , A__ , A__=False ):
lowercase__ = False if 'rvlcdip' in checkpoint_url else True
lowercase__ = BeitConfig(use_absolute_position_embeddings=A__ , use_mask_token=A__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase__ = 1_024
lowercase__ = 4_096
lowercase__ = 24
lowercase__ = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase__ = 16
lowercase__ = 'huggingface/label-files'
lowercase__ = 'rvlcdip-id2label.json'
lowercase__ = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(A__ ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase__ = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' )['model']
lowercase__ = create_rename_keys(A__ , has_lm_head=A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , has_lm_head=A__ )
# load HuggingFace model
lowercase__ = BeitForMaskedImageModeling(A__ ) if has_lm_head else BeitForImageClassification(A__ )
model.eval()
model.load_state_dict(A__ )
# Check outputs on an image
lowercase__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=A__ )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=A__ , return_tensors='pt' )
lowercase__ = encoding['pixel_values']
lowercase__ = model(A__ )
lowercase__ = outputs.logits
# verify logits
lowercase__ = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(A__ ), "Shape of logits not as expected"
Path(A__ ).mkdir(exist_ok=A__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A__ )
if push_to_hub:
if has_lm_head:
lowercase__ = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
lowercase__ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=A__ , )
model.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=A__ , )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
a__ : str = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 642
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowerCAmelCase ( A__ ):
lowercase__ = prime_factors(A__ )
if is_square_free(A__ ):
return -1 if len(A__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : List[str] = logging.get_logger(__name__)
a__ : List[Any] = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
A : List[str] = "focalnet"
def __init__( self : Dict , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=96 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int=[1_92, 3_84, 7_68, 7_68] , lowerCAmelCase : str=[2, 2, 6, 2] , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[3, 3, 3, 3] , lowerCAmelCase : int="gelu" , lowerCAmelCase : Any=4.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Tuple=1E-4 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = use_conv_embed
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = focal_levels
lowercase__ = focal_windows
lowercase__ = hidden_act
lowercase__ = mlp_ratio
lowercase__ = hidden_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = use_layerscale
lowercase__ = layerscale_value
lowercase__ = use_post_layernorm
lowercase__ = use_post_layernorm_in_modulation
lowercase__ = normalize_modulator
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = encoder_stride
lowercase__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)]
lowercase__, lowercase__ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
| 642
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : Dict = logging.get_logger(__name__)
a__ : Union[str, Any] = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[int] = "deformable_detr"
A : str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : List[Any] , lowerCAmelCase : Any=True , lowerCAmelCase : Tuple=None , lowerCAmelCase : Any=3 , lowerCAmelCase : Any=3_00 , lowerCAmelCase : Tuple=10_24 , lowerCAmelCase : str=6 , lowerCAmelCase : Tuple=10_24 , lowerCAmelCase : List[str]=8 , lowerCAmelCase : List[str]=6 , lowerCAmelCase : Any=10_24 , lowerCAmelCase : List[str]=8 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Any=True , lowerCAmelCase : int="relu" , lowerCAmelCase : List[Any]=2_56 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : List[str]=1.0 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int="sine" , lowerCAmelCase : int="resnet50" , lowerCAmelCase : Dict=True , lowerCAmelCase : Tuple=False , lowerCAmelCase : Dict=4 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Any=False , lowerCAmelCase : Tuple=3_00 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Union[str, Any]=1 , lowerCAmelCase : int=5 , lowerCAmelCase : str=2 , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict=1 , lowerCAmelCase : Optional[int]=5 , lowerCAmelCase : Any=2 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : int=0.25 , lowerCAmelCase : Optional[Any]=False , **lowerCAmelCase : str , ) -> str:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
lowercase__ = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowerCAmelCase , lowerCAmelCase):
lowercase__ = backbone_config.get('model_type')
lowercase__ = CONFIG_MAPPING[backbone_model_type]
lowercase__ = config_class.from_dict(lowerCAmelCase)
lowercase__ = use_timm_backbone
lowercase__ = backbone_config
lowercase__ = num_channels
lowercase__ = num_queries
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = init_xavier_std
lowercase__ = encoder_layerdrop
lowercase__ = auxiliary_loss
lowercase__ = position_embedding_type
lowercase__ = backbone
lowercase__ = use_pretrained_backbone
lowercase__ = dilation
# deformable attributes
lowercase__ = num_feature_levels
lowercase__ = encoder_n_points
lowercase__ = decoder_n_points
lowercase__ = two_stage
lowercase__ = two_stage_num_proposals
lowercase__ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
lowercase__ = class_cost
lowercase__ = bbox_cost
lowercase__ = giou_cost
# Loss coefficients
lowercase__ = mask_loss_coefficient
lowercase__ = dice_loss_coefficient
lowercase__ = bbox_loss_coefficient
lowercase__ = giou_loss_coefficient
lowercase__ = eos_coefficient
lowercase__ = focal_alpha
lowercase__ = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase)
@property
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
return self.d_model
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
lowercase__ = self.backbone_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 642
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
a__ : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
a__ : Any = {"facebook/blenderbot_small-90M": 5_12}
def _lowerCAmelCase ( A__ ):
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
lowercase__ = set(A__ )
return pairs
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[str] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : int="__start__" , lowerCAmelCase : Dict="__end__" , lowerCAmelCase : Any="__unk__" , lowerCAmelCase : str="__null__" , **lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase)
with open(lowerCAmelCase , encoding='utf-8') as vocab_handle:
lowercase__ = json.load(lowerCAmelCase)
lowercase__ = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase , encoding='utf-8') as merges_handle:
lowercase__ = merges_handle.read().split('\n')[1:-1]
lowercase__ = [tuple(merge.split()) for merge in merges]
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = {}
@property
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return len(self.encoder)
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self : str , lowerCAmelCase : str) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ = re.sub('([.,!?()])' , R' \1' , lowerCAmelCase)
lowercase__ = re.sub('(\')' , R' \1 ' , lowerCAmelCase)
lowercase__ = re.sub(R'\s{2,}' , ' ' , lowerCAmelCase)
if "\n" in token:
lowercase__ = token.replace('\n' , ' __newln__')
lowercase__ = token.split(' ')
lowercase__ = []
for token in tokens:
if not len(lowerCAmelCase):
continue
lowercase__ = token.lower()
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = tuple(list(word[:-1]) + [word[-1] + '</w>'])
lowercase__ = get_pairs(lowerCAmelCase)
if not pairs:
words.append(lowerCAmelCase)
continue
while True:
lowercase__ = min(lowerCAmelCase , key=lambda lowerCAmelCase: self.bpe_ranks.get(lowerCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__, lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(lowerCAmelCase):
try:
lowercase__ = word.index(lowerCAmelCase , lowerCAmelCase)
new_word.extend(word[i:j])
lowercase__ = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowerCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = new_word
if len(lowerCAmelCase) == 1:
break
else:
lowercase__ = get_pairs(lowerCAmelCase)
lowercase__ = '@@ '.join(lowerCAmelCase)
lowercase__ = word[:-4]
lowercase__ = word
words.append(lowerCAmelCase)
return " ".join(lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = re.findall(R'\S+\n?' , lowerCAmelCase)
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase).split(' ')))
return split_tokens
def UpperCAmelCase ( self : int , lowerCAmelCase : str) -> int:
"""simple docstring"""
lowercase__ = token.lower()
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[str]) -> str:
"""simple docstring"""
lowercase__ = ' '.join(lowerCAmelCase).replace('@@ ' , '').strip()
return out_string
def UpperCAmelCase ( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(lowerCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase) + '\n')
lowercase__ = 0
with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!')
lowercase__ = token_index
writer.write(' '.join(lowerCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
| 642
| 1
|
from __future__ import annotations
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , ):
lowercase__ = len(A__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A__ , A__ , )
def _lowerCAmelCase ( A__ ):
lowercase__ = []
depth_first_search([] , [] , [] , A__ , A__ )
# Print all the boards
for board in boards:
for column in board:
print(A__ )
print('' )
print(len(A__ ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
def _lowerCAmelCase ( A__ ):
if isinstance(A__ , A__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(A__ , A__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
lowercase__ = False
if num < 0:
lowercase__ = True
lowercase__ = -num
lowercase__ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A__ ) for e in binary )
return "0b" + "".join(str(A__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
|
import heapq
import sys
import numpy as np
a__ : Dict = tuple[int, int]
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = []
lowercase__ = set()
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf')
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return len(self.elements) == 0
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(lowerCAmelCase)
else:
# update
# print("update", item)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
if item in self.set:
self.set.remove(lowerCAmelCase)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
self.set.remove(lowerCAmelCase)
return (priority, item)
def _lowerCAmelCase ( A__ , A__ ):
# euclidean distance
lowercase__ = np.array(A__ )
lowercase__ = np.array(A__ )
return np.linalg.norm(a - b )
def _lowerCAmelCase ( A__ , A__ ):
# integer division by time variable
return consistent_heuristic(A__ , A__ ) // t
def _lowerCAmelCase ( A__ , A__ ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ )
return ans
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = np.chararray((n, n) )
for i in range(A__ ):
for j in range(A__ ):
lowercase__ = '*'
for i in range(A__ ):
for j in range(A__ ):
if (j, (n - 1) - i) in blocks:
lowercase__ = '#'
lowercase__ = '-'
lowercase__ = back_pointer[goal]
while x != start:
((lowercase__), (lowercase__)) = x
# print(x)
lowercase__ = '-'
lowercase__ = back_pointer[x]
lowercase__ = '-'
for i in range(A__ ):
for j in range(A__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowercase__ = back_pointer[goal]
while x != start:
print(A__ , end=' ' )
lowercase__ = back_pointer[x]
print(A__ )
sys.exit()
def _lowerCAmelCase ( A__ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
for itera in range(A__ ):
open_list[itera].remove_element(A__ )
# print("s", s)
# print("j", j)
((lowercase__), (lowercase__)) = s
lowercase__ = (x - 1, y)
lowercase__ = (x + 1, y)
lowercase__ = (x, y + 1)
lowercase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(A__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(A__ )
lowercase__ = -1
lowercase__ = float('inf' )
if valid(A__ ) and g_function[neighbours] > g_function[s] + 1:
lowercase__ = g_function[s] + 1
lowercase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) )
if neighbours not in close_list_inad:
for var in range(1 , A__ ):
if key(A__ , A__ , A__ , A__ ) <= Wa * key(
A__ , 0 , A__ , A__ ):
open_list[j].put(
A__ , key(A__ , A__ , A__ , A__ ) )
def _lowerCAmelCase ( ):
lowercase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a__ : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a__ : Any = make_common_ground()
a__ : Union[str, Any] = blocks_blk
# hyper parameters
a__ : List[Any] = 1
a__ : List[str] = 1
a__ : Optional[int] = 20
a__ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a__ : Tuple = (0, 0)
a__ : str = (n - 1, n - 1)
a__ : Optional[Any] = 1
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = {start: 0, goal: float('inf' )}
lowercase__ = {start: -1, goal: -1}
lowercase__ = []
lowercase__ = set()
for i in range(A__ ):
open_list.append(PriorityQueue() )
open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) )
lowercase__ = []
lowercase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , A__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__, lowercase__ = open_list[i].top_show()
visited.add(A__ )
expand_state(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_inad.append(A__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__ = open_list[0].top_show()
visited.add(A__ )
expand_state(
A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_anchor.append(A__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(A__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 642
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a__ : List[Any] = logging.get_logger(__name__)
a__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
a__ : List[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
a__ : int = {
"facebook/bart-base": 10_24,
"facebook/bart-large": 10_24,
"facebook/bart-large-mnli": 10_24,
"facebook/bart-large-cnn": 10_24,
"facebook/bart-large-xsum": 10_24,
"yjernite/bart_eli5": 10_24,
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = ["input_ids", "attention_mask"]
A : Any = BartTokenizer
def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="replace" , lowerCAmelCase : str="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = getattr(lowerCAmelCase , pre_tok_state.pop('type'))
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**lowerCAmelCase)
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = 'post_processor'
lowercase__ = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state['sep'])
if "cls" in state:
lowercase__ = tuple(state['cls'])
lowercase__ = False
if state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get('trim_offsets' , lowerCAmelCase) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(lowerCAmelCase , state.pop('type'))
lowercase__ = component_class(**lowerCAmelCase)
setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
@property
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else value
lowercase__ = value
def UpperCAmelCase ( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None) -> Tuple:
"""simple docstring"""
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 642
|
import math
import sys
def _lowerCAmelCase ( A__ ):
lowercase__ = ''
try:
with open(A__ , 'rb' ) as binary_file:
lowercase__ = binary_file.read()
for dat in data:
lowercase__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = {'0': '0', '1': '1'}
lowercase__, lowercase__ = '', ''
lowercase__ = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__ = lexicon[curr_string]
result += last_match_id
lowercase__ = last_match_id + '0'
if math.loga(A__ ).is_integer():
lowercase__ = {}
for curr_key in list(A__ ):
lowercase__ = lexicon.pop(A__ )
lowercase__ = new_lex
lowercase__ = last_match_id + '1'
index += 1
lowercase__ = ''
return result
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 8
try:
with open(A__ , 'wb' ) as opened_file:
lowercase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase__ = data_bits[counter:]
lowercase__ = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = read_file_binary(A__ )
lowercase__ = remove_prefix(A__ )
lowercase__ = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 642
| 1
|
from __future__ import annotations
a__ : str = tuple[int, int, int]
a__ : int = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
a__ : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
a__ : List[str] = "EGZWVONAHDCLFQMSIPJBYUKXTR"
a__ : List[str] = "FOBHMDKEXQNRAULPGSJVTYICZW"
a__ : Optional[int] = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
a__ : int = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
a__ : Any = "RMDJXFUWGISLHVTCQNKYPBEZOA"
a__ : Tuple = "SGLCPQWZHKXAREONTFBVIYJUDM"
a__ : List[Any] = "HVSICLTYKQUBXDWAJZOMFGPREN"
a__ : int = "RZWQHFMVDBKICJLNTUXAGYPSOE"
a__ : int = "LFKIJODBEGAMQPXVUHYSTCZRWN"
a__ : str = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def _lowerCAmelCase ( A__ , A__ , A__ ):
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(A__ ) )) < 3:
lowercase__ = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(A__ )
# Checks if rotor positions are valid
lowercase__, lowercase__, lowercase__ = rotpos
if not 0 < rotorposa <= len(A__ ):
lowercase__ = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(A__ )
if not 0 < rotorposa <= len(A__ ):
lowercase__ = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(A__ )
if not 0 < rotorposa <= len(A__ ):
lowercase__ = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(A__ )
# Validates string and returns dict
lowercase__ = _plugboard(A__ )
return rotpos, rotsel, pbdict
def _lowerCAmelCase ( A__ ):
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(A__ , A__ ):
lowercase__ = F'''Plugboard setting isn\'t type string ({type(A__ )})'''
raise TypeError(A__ )
elif len(A__ ) % 2 != 0:
lowercase__ = F'''Odd number of symbols ({len(A__ )})'''
raise Exception(A__ )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
lowercase__ = set()
for i in pbstring:
if i not in abc:
lowercase__ = F'''\'{i}\' not in list of symbols'''
raise Exception(A__ )
elif i in tmppbl:
lowercase__ = F'''Duplicate symbol ({i})'''
raise Exception(A__ )
else:
tmppbl.add(A__ )
del tmppbl
# Created the dictionary
lowercase__ = {}
for j in range(0 , len(A__ ) - 1 , 2 ):
lowercase__ = pbstring[j + 1]
lowercase__ = pbstring[j]
return pb
def _lowerCAmelCase ( A__ , A__ , A__ = (rotora, rotora, rotora) , A__ = "" , ):
lowercase__ = text.upper()
lowercase__, lowercase__, lowercase__ = _validator(
A__ , A__ , plugb.upper() )
lowercase__, lowercase__, lowercase__ = rotor_position
lowercase__, lowercase__, lowercase__ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowercase__ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowercase__ = plugboard[symbol]
# rotor ra --------------------------
lowercase__ = abc.index(A__ ) + rotorposa
lowercase__ = rotora[index % len(A__ )]
# rotor rb --------------------------
lowercase__ = abc.index(A__ ) + rotorposa
lowercase__ = rotora[index % len(A__ )]
# rotor rc --------------------------
lowercase__ = abc.index(A__ ) + rotorposa
lowercase__ = rotora[index % len(A__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowercase__ = reflector[symbol]
# 2nd rotors
lowercase__ = abc[rotora.index(A__ ) - rotorposa]
lowercase__ = abc[rotora.index(A__ ) - rotorposa]
lowercase__ = abc[rotora.index(A__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowercase__ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(A__ ):
lowercase__ = 0
rotorposa += 1
if rotorposa >= len(A__ ):
lowercase__ = 0
rotorposa += 1
if rotorposa >= len(A__ ):
lowercase__ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(A__ )
return "".join(A__ )
if __name__ == "__main__":
a__ : List[Any] = "This is my Python script that emulates the Enigma machine from WWII."
a__ : Any = (1, 1, 1)
a__ : Union[str, Any] = "pictures"
a__ : str = (rotora, rotora, rotora)
a__ : List[str] = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 642
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {"vocab_file": "vocab.txt"}
a__ : int = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
a__ : Dict = {
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def _lowerCAmelCase ( A__ ):
with open(A__ , 'r' ) as f:
lowercase__ = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Dict="<cls>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Optional[Any]="<eos>" , **lowerCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = load_vocab_file(lowerCAmelCase)
lowercase__ = dict(enumerate(self.all_tokens))
lowercase__ = {tok: ind for ind, tok in enumerate(self.all_tokens)}
lowercase__ = unk_token
lowercase__ = cls_token
lowercase__ = pad_token
lowercase__ = mask_token
lowercase__ = eos_token
lowercase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
return text.split()
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any=False) -> Union[str, Any]:
"""simple docstring"""
return len(self._id_to_token)
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens)}
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.cls_token_id]
lowercase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase__ = [1] + ([0] * len(lowerCAmelCase)) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCAmelCase) + [1]
return mask
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(lowerCAmelCase , 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[List[str], List[AddedToken]] , lowerCAmelCase : bool = False) -> int:
"""simple docstring"""
return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase)
| 642
| 1
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _lowerCAmelCase ( ):
lowercase__ = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
lowercase__ = Dataset.from_dict(A__ )
return dataset
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
lowercase__ = get_dataset()
lowercase__ = make_duplicate_clusters(lowerCAmelCase , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
lowercase__ = get_dataset()
lowercase__, lowercase__ = deduplicate_dataset(lowerCAmelCase)
self.assertEqual(len(lowerCAmelCase) , 2)
print(lowerCAmelCase)
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2)
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , lowerCAmelCase)
| 642
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a__ : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a__ : Optional[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a__ : Tuple = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def UpperCAmelCase ( self : int , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase)
}
| 642
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : List[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : int = "speech_to_text"
A : Optional[Any] = ["past_key_values"]
A : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[int] , lowerCAmelCase : Tuple=1_00_00 , lowerCAmelCase : int=12 , lowerCAmelCase : int=20_48 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : str=6 , lowerCAmelCase : Dict=20_48 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict="relu" , lowerCAmelCase : Tuple=2_56 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Any=60_00 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=(5, 5) , lowerCAmelCase : Union[str, Any]=10_24 , lowerCAmelCase : List[Any]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
lowercase__ = num_conv_layers
lowercase__ = list(lowerCAmelCase)
lowercase__ = conv_channels
lowercase__ = input_feat_per_channel
lowercase__ = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''')
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 642
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = block_sizes
lowercase__ = num_decoder_layers
lowercase__ = d_model
lowercase__ = n_head
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = 2
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase__ = n_head
# Used in the tests to check the size of the first hidden state
lowercase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase__ = self.num_hidden_layers + 2
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str:
"""simple docstring"""
lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase)
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A : Dict = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Optional[int] = False
A : Optional[int] = False
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase)
@require_tf
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A : List[str] = False
A : int = False
def UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
| 642
| 1
|
def _lowerCAmelCase ( ):
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def _lowerCAmelCase ( A__ ):
lowercase__ = 1
lowercase__ = 2
while i * i <= n:
lowercase__ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _lowerCAmelCase ( ):
return next(i for i in triangle_number_generator() if count_divisors(A__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 642
|
def _lowerCAmelCase ( A__ , A__ , A__ ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
lowercase__ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowercase__ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowercase__ = {'unk_token': '<unk>'}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowerCAmelCase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowerCAmelCase))
lowercase__ = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowercase__ = os.path.join(self.tmpdirname , lowerCAmelCase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int] , **lowerCAmelCase : Any) -> List[str]:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int] , **lowerCAmelCase : Dict) -> Dict:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int] , **lowerCAmelCase : Dict) -> Dict:
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
lowercase__ = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = OwlViTProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase)
processor_slow.save_pretrained(self.tmpdirname)
lowercase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase)
lowercase__ = OwlViTProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase)
processor_fast.save_pretrained(self.tmpdirname)
lowercase__ = OwlViTProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase)
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase)
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
lowercase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
lowercase__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
lowercase__ = self.get_image_processor(do_normalize=lowerCAmelCase)
lowercase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCAmelCase)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase)
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCAmelCase , return_tensors='np')
lowercase__ = processor(images=lowerCAmelCase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase)
lowercase__ = 'lower newer'
lowercase__ = processor(text=lowerCAmelCase , return_tensors='np')
lowercase__ = tokenizer(lowerCAmelCase , return_tensors='np')
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist())
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase)
lowercase__ = 'lower newer'
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCAmelCase , images=lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase):
processor()
def UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
lowercase__ = 'google/owlvit-base-patch32'
lowercase__ = OwlViTProcessor.from_pretrained(lowerCAmelCase)
lowercase__ = ['cat', 'nasa badge']
lowercase__ = processor(text=lowerCAmelCase)
lowercase__ = 16
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask'])
self.assertEqual(inputs['input_ids'].shape , (2, seq_length))
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase):
processor()
def UpperCAmelCase ( self : List[Any]) -> Any:
"""simple docstring"""
lowercase__ = 'google/owlvit-base-patch32'
lowercase__ = OwlViTProcessor.from_pretrained(lowerCAmelCase)
lowercase__ = [['cat', 'nasa badge'], ['person']]
lowercase__ = processor(text=lowerCAmelCase)
lowercase__ = 16
lowercase__ = len(lowerCAmelCase)
lowercase__ = max([len(lowerCAmelCase) for texts in input_texts])
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask'])
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length))
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase):
processor()
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
lowercase__ = 'google/owlvit-base-patch32'
lowercase__ = OwlViTProcessor.from_pretrained(lowerCAmelCase)
lowercase__ = ['cat', 'nasa badge']
lowercase__ = processor(text=lowerCAmelCase)
lowercase__ = 16
lowercase__ = inputs['input_ids']
lowercase__ = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask'])
self.assertEqual(inputs['input_ids'].shape , (2, seq_length))
self.assertListEqual(list(input_ids[0]) , predicted_ids[0])
self.assertListEqual(list(input_ids[1]) , predicted_ids[1])
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase)
lowercase__ = self.prepare_image_inputs()
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(images=lowerCAmelCase , query_images=lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['query_pixel_values', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase):
processor()
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase)
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(lowerCAmelCase)
lowercase__ = tokenizer.batch_decode(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
| 642
|
from __future__ import annotations
def _lowerCAmelCase ( A__ , A__ ):
if b == 0:
return (1, 0)
((lowercase__), (lowercase__)) = extended_euclid(A__ , a % b )
lowercase__ = a // b
return (y, x - k * y)
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowerCAmelCase ( A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
if b < 0:
lowercase__ = (b % n + n) % n
return b
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__, lowercase__ = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 642
| 1
|
from copy import deepcopy
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : str , lowerCAmelCase : list[int] | None = None , lowerCAmelCase : int | None = None) -> None:
"""simple docstring"""
if arr is None and size is not None:
lowercase__ = size
lowercase__ = [0] * size
elif arr is not None:
self.init(lowerCAmelCase)
else:
raise ValueError('Either arr or size must be specified')
def UpperCAmelCase ( self : Any , lowerCAmelCase : list[int]) -> None:
"""simple docstring"""
lowercase__ = len(lowerCAmelCase)
lowercase__ = deepcopy(lowerCAmelCase)
for i in range(1 , self.size):
lowercase__ = self.next_(lowerCAmelCase)
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCAmelCase ( self : Tuple) -> list[int]:
"""simple docstring"""
lowercase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1):
lowercase__ = self.next_(lowerCAmelCase)
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : int) -> int:
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : int) -> int:
"""simple docstring"""
return index - (index & (-index))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : int) -> None:
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowercase__ = self.next_(lowerCAmelCase)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : int) -> None:
"""simple docstring"""
self.add(lowerCAmelCase , value - self.get(lowerCAmelCase))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> int:
"""simple docstring"""
if right == 0:
return 0
lowercase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowercase__ = self.prev(lowerCAmelCase)
return result
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : int) -> int:
"""simple docstring"""
return self.prefix(lowerCAmelCase) - self.prefix(lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> int:
"""simple docstring"""
return self.query(lowerCAmelCase , index + 1)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> int:
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
lowercase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowercase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[Any] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = "umt5"
A : List[str] = ["past_key_values"]
def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str:
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_kv
lowercase__ = d_ff
lowercase__ = num_layers
lowercase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ = num_heads
lowercase__ = relative_attention_num_buckets
lowercase__ = relative_attention_max_distance
lowercase__ = dropout_rate
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_factor
lowercase__ = feed_forward_proj
lowercase__ = use_cache
lowercase__ = self.feed_forward_proj.split('-')
lowercase__ = act_info[-1]
lowercase__ = act_info[0] == 'gated'
if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
lowercase__ = 'gelu_new'
@property
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
return self.d_model
@property
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return self.num_heads
@property
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return self.num_layers
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase__ = 'past_encoder_sequence + sequence'
lowercase__ = {0: 'batch'}
lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return 13
@property
def UpperCAmelCase ( self : Optional[Any]) -> float:
"""simple docstring"""
return 5E-4
| 642
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : str = XGLMTokenizer
A : List[Any] = XGLMTokenizerFast
A : int = True
A : Optional[Any] = True
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = '<pad>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(len(lowerCAmelCase) , 10_08)
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_08)
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
lowercase__ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M')
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name)
lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase)
lowercase__ = pickle.dumps(lowerCAmelCase)
pickle.loads(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
lowercase__ = 'Hello World!'
lowercase__ = [2, 3_12_27, 44_47, 35]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = {
'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
| 642
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a__ : str = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Tuple = ["pixel_values"]
def __init__( self : Union[str, Any] , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 2_55 , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : bool = True , **lowerCAmelCase : Any , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = size if size is not None else {'shortest_edge': 2_24}
lowercase__ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase)
lowercase__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
lowercase__ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase , param_name='crop_size')
lowercase__ = do_resize
lowercase__ = size
lowercase__ = resample
lowercase__ = do_center_crop
lowercase__ = crop_size
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ = do_convert_rgb
def UpperCAmelCase ( self : Dict , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : str , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase)
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''')
lowercase__ = get_resize_output_image_size(lowerCAmelCase , size=size['shortest_edge'] , default_to_square=lowerCAmelCase)
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(lowerCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''')
return center_crop(lowerCAmelCase , size=(size['height'], size['width']) , data_format=lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : str , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : int = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase : Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(lowerCAmelCase , param_name='size' , default_to_square=lowerCAmelCase)
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ = crop_size if crop_size is not None else self.crop_size
lowercase__ = get_size_dict(lowerCAmelCase , param_name='crop_size' , default_to_square=lowerCAmelCase)
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ = make_list_of_images(lowerCAmelCase)
if not valid_images(lowerCAmelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ = [convert_to_rgb(lowerCAmelCase) for image in images]
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(lowerCAmelCase) for image in images]
if do_resize:
lowercase__ = [self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase) for image in images]
if do_center_crop:
lowercase__ = [self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase) for image in images]
lowercase__ = [to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase) for image in images]
lowercase__ = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase)
| 642
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = data
lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64)
lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data))
return padded_data
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]:
"""simple docstring"""
lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64
for i in range(16 , 80):
lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.padding()
lowercase__ = self.split_blocks()
for block in self.blocks:
lowercase__ = self.expand_block(lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h
for i in range(0 , 80):
if 0 <= i < 20:
lowercase__ = (b & c) | ((~b) & d)
lowercase__ = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase__ = b ^ c ^ d
lowercase__ = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
lowercase__ = (b & c) | (b & d) | (c & d)
lowercase__ = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
lowercase__ = b ^ c ^ d
lowercase__ = 0XC_A_6_2_C_1_D_6
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = (
self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(lowerCAmelCase , 30),
c,
d,
)
lowercase__ = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h)
def _lowerCAmelCase ( ):
lowercase__ = B'Test String'
assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(A__ , 'utf-8' )
print(SHAaHash(A__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 642
| 1
|
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
pass
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
pass
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [
[],
[],
[],
]
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : int) -> None:
"""simple docstring"""
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError('Maximum queue size is 100')
self.queues[priority].append(lowerCAmelCase)
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2')
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError('All queues are empty')
def __str__( self : List[str]) -> str:
"""simple docstring"""
return "\n".join(f'''Priority {i}: {q}''' for i, q in enumerate(self.queues))
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : int) -> Any:
"""simple docstring"""
lowercase__ = []
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> None:
"""simple docstring"""
if len(self.queue) == 1_00:
raise OverFlowError('Maximum queue size is 100')
self.queue.append(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError('The queue is empty')
else:
lowercase__ = min(self.queue)
self.queue.remove(lowerCAmelCase)
return data
def __str__( self : Any) -> str:
"""simple docstring"""
return str(self.queue)
def _lowerCAmelCase ( ):
lowercase__ = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(A__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(A__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _lowerCAmelCase ( ):
lowercase__ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(A__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(A__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 642
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a__ : List[Any] = logging.get_logger(__name__)
a__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
a__ : List[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
a__ : int = {
"facebook/bart-base": 10_24,
"facebook/bart-large": 10_24,
"facebook/bart-large-mnli": 10_24,
"facebook/bart-large-cnn": 10_24,
"facebook/bart-large-xsum": 10_24,
"yjernite/bart_eli5": 10_24,
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = ["input_ids", "attention_mask"]
A : Any = BartTokenizer
def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="replace" , lowerCAmelCase : str="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = getattr(lowerCAmelCase , pre_tok_state.pop('type'))
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**lowerCAmelCase)
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = 'post_processor'
lowercase__ = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state['sep'])
if "cls" in state:
lowercase__ = tuple(state['cls'])
lowercase__ = False
if state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get('trim_offsets' , lowerCAmelCase) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(lowerCAmelCase , state.pop('type'))
lowercase__ = component_class(**lowerCAmelCase)
setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
@property
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else value
lowercase__ = value
def UpperCAmelCase ( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None) -> Tuple:
"""simple docstring"""
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 642
| 1
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
a__ : int = True
from torch.cuda.amp import autocast
a__ : str = logging.getLogger(__name__)
def _lowerCAmelCase ( A__=None , A__=None ):
return field(default_factory=lambda: default , metadata=A__ )
@dataclass
class UpperCAmelCase__:
'''simple docstring'''
A : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A : Optional[bool] = field(
default=lowerCamelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
A : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
A : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
A : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
A : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
A : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
A : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class UpperCAmelCase__:
'''simple docstring'''
A : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
A : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
A : bool = field(
default=lowerCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
A : Optional[int] = field(
default=lowerCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
A : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
A : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class UpperCAmelCase__:
'''simple docstring'''
A : WavaVecaProcessor
A : Union[bool, str] = True
A : Optional[int] = None
A : Optional[int] = None
A : Optional[int] = None
A : Optional[int] = None
def __call__( self : Any , lowerCAmelCase : List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
"""simple docstring"""
lowercase__ = [{'input_values': feature['input_values']} for feature in features]
lowercase__ = [{'input_ids': feature['labels']} for feature in features]
lowercase__ = self.processor.pad(
lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
lowercase__ = self.processor.pad(
labels=lowerCAmelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
lowercase__ = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1) , -1_00)
lowercase__ = labels
return batch
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : nn.Module , lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""simple docstring"""
model.train()
lowercase__ = self._prepare_inputs(lowerCAmelCase)
if self.use_amp:
with autocast():
lowercase__ = self.compute_loss(lowerCAmelCase , lowerCAmelCase)
else:
lowercase__ = self.compute_loss(lowerCAmelCase , lowerCAmelCase)
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
lowercase__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase__ = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''')
if self.args.gradient_accumulation_steps > 1:
lowercase__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase , self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase)
else:
loss.backward()
return loss.detach()
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__, lowercase__, lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__, lowercase__, lowercase__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , A__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
lowercase__ = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
lowercase__ = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
lowercase__ = F'''[{''.join(data_args.chars_to_ignore )}]'''
def remove_special_characters(A__ ):
lowercase__ = re.sub(A__ , '' , batch['sentence'] ).lower() + ' '
return batch
lowercase__ = train_dataset.map(A__ , remove_columns=['sentence'] )
lowercase__ = eval_dataset.map(A__ , remove_columns=['sentence'] )
def extract_all_chars(A__ ):
lowercase__ = ' '.join(batch['text'] )
lowercase__ = list(set(A__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
lowercase__ = train_dataset.map(
A__ , batched=A__ , batch_size=-1 , keep_in_memory=A__ , remove_columns=train_dataset.column_names , )
lowercase__ = train_dataset.map(
A__ , batched=A__ , batch_size=-1 , keep_in_memory=A__ , remove_columns=eval_dataset.column_names , )
lowercase__ = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
lowercase__ = {v: k for k, v in enumerate(A__ )}
lowercase__ = vocab_dict[' ']
del vocab_dict[" "]
lowercase__ = len(A__ )
lowercase__ = len(A__ )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(A__ , A__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=A__ , return_attention_mask=A__ )
lowercase__ = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ )
lowercase__ = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
lowercase__ = min(len(A__ ) , data_args.max_train_samples )
lowercase__ = train_dataset.select(range(A__ ) )
if data_args.max_val_samples is not None:
lowercase__ = eval_dataset.select(range(data_args.max_val_samples ) )
lowercase__ = torchaudio.transforms.Resample(48_000 , 16_000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(A__ ):
lowercase__, lowercase__ = torchaudio.load(batch['path'] )
lowercase__ = resampler(A__ ).squeeze().numpy()
lowercase__ = 16_000
lowercase__ = batch['text']
return batch
lowercase__ = train_dataset.map(
A__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
lowercase__ = eval_dataset.map(
A__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(A__ ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
lowercase__ = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(A__ )
return batch
lowercase__ = train_dataset.map(
A__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A__ , num_proc=data_args.preprocessing_num_workers , )
lowercase__ = eval_dataset.map(
A__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
lowercase__ = datasets.load_metric('wer' )
def compute_metrics(A__ ):
lowercase__ = pred.predictions
lowercase__ = np.argmax(A__ , axis=-1 )
lowercase__ = processor.tokenizer.pad_token_id
lowercase__ = processor.batch_decode(A__ )
# we do not want to group tokens when computing the metrics
lowercase__ = processor.batch_decode(pred.label_ids , group_tokens=A__ )
lowercase__ = wer_metric.compute(predictions=A__ , references=A__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
lowercase__ = DataCollatorCTCWithPadding(processor=A__ , padding=A__ )
# Initialize our Trainer
lowercase__ = CTCTrainer(
model=A__ , data_collator=A__ , args=A__ , compute_metrics=A__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase__ = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
lowercase__ = model_args.model_name_or_path
else:
lowercase__ = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
lowercase__ = trainer.train(resume_from_checkpoint=A__ )
trainer.save_model()
lowercase__ = train_result.metrics
lowercase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A__ )
)
lowercase__ = min(A__ , len(A__ ) )
trainer.log_metrics('train' , A__ )
trainer.save_metrics('train' , A__ )
trainer.save_state()
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase__ = trainer.evaluate()
lowercase__ = data_args.max_val_samples if data_args.max_val_samples is not None else len(A__ )
lowercase__ = min(A__ , len(A__ ) )
trainer.log_metrics('eval' , A__ )
trainer.save_metrics('eval' , A__ )
return results
if __name__ == "__main__":
main()
| 642
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : str = (DDIMParallelScheduler,)
A : Any = (("eta", 0.0), ("num_inference_steps", 50))
def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowerCAmelCase)
return config
def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCAmelCase)
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase)
for t in scheduler.timesteps:
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample
return sample
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase)
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1)
lowercase__ = scheduler_class(**lowerCAmelCase)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1]))
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]):
self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = self.dummy_sample_deter + 0.1
lowercase__ = self.dummy_sample_deter - 0.1
lowercase__ = samplea.shape[0]
lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0)
lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase)
lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 11_47.79_04) < 1E-2
assert abs(result_mean.item() - 0.49_82) < 1E-3
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_72.00_67) < 1E-2
assert abs(result_mean.item() - 0.22_39_67) < 1E-3
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(prediction_type='v_prediction')
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 52.53_02) < 1E-2
assert abs(result_mean.item() - 0.06_84) < 1E-3
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.82_95) < 1E-2
assert abs(result_mean.item() - 0.19_51) < 1E-3
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.07_84) < 1E-2
assert abs(result_mean.item() - 0.19_41) < 1E-3
| 642
| 1
|
from math import pow, sqrt
def _lowerCAmelCase ( *A__ ):
lowercase__ = len(A__ ) > 0 and all(value > 0.0 for value in values )
return result
def _lowerCAmelCase ( A__ , A__ ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A__ , A__ )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def _lowerCAmelCase ( A__ , A__ , A__ ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _lowerCAmelCase ( A__ , A__ , A__ ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _lowerCAmelCase ( A__ , A__ , A__ ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _lowerCAmelCase ( A__ , A__ , A__ ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 642
|
import cva
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : float , lowerCAmelCase : int) -> Dict:
"""simple docstring"""
if k in (0.04, 0.06):
lowercase__ = k
lowercase__ = window_size
else:
raise ValueError('invalid k value')
def __str__( self : Tuple) -> str:
"""simple docstring"""
return str(self.k)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowercase__ = cva.imread(lowerCAmelCase , 0)
lowercase__, lowercase__ = img.shape
lowercase__ = []
lowercase__ = img.copy()
lowercase__ = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB)
lowercase__, lowercase__ = np.gradient(lowerCAmelCase)
lowercase__ = dx**2
lowercase__ = dy**2
lowercase__ = dx * dy
lowercase__ = 0.04
lowercase__ = self.window_size // 2
for y in range(lowerCAmelCase , h - offset):
for x in range(lowerCAmelCase , w - offset):
lowercase__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = (wxx * wyy) - (wxy**2)
lowercase__ = wxx + wyy
lowercase__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 2_55)
return color_img, corner_list
if __name__ == "__main__":
a__ : Dict = HarrisCorner(0.0_4, 3)
a__ , a__ : Dict = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 642
| 1
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a__ : str = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : List[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : int = "speech_to_text"
A : Optional[Any] = ["past_key_values"]
A : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[int] , lowerCAmelCase : Tuple=1_00_00 , lowerCAmelCase : int=12 , lowerCAmelCase : int=20_48 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : str=6 , lowerCAmelCase : Dict=20_48 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict="relu" , lowerCAmelCase : Tuple=2_56 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Any=60_00 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=(5, 5) , lowerCAmelCase : Union[str, Any]=10_24 , lowerCAmelCase : List[Any]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
lowercase__ = num_conv_layers
lowercase__ = list(lowerCAmelCase)
lowercase__ = conv_channels
lowercase__ = input_feat_per_channel
lowercase__ = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''')
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 642
| 1
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _lowerCAmelCase ( A__ ):
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _lowerCAmelCase ( ):
lowercase__ = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=A__ )
lowercase__ = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(A__ )
EnvironmentCommand.register_subcommand(A__ )
TestCommand.register_subcommand(A__ )
RunBeamCommand.register_subcommand(A__ )
DummyDataCommand.register_subcommand(A__ )
# Parse args
lowercase__, lowercase__ = parser.parse_known_args()
if not hasattr(A__ , 'func' ):
parser.print_help()
exit(1 )
lowercase__ = parse_unknown_args(A__ )
# Run
lowercase__ = args.func(A__ , **A__ )
service.run()
if __name__ == "__main__":
main()
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
a__ : Dict = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
a__ : Optional[Any] = get_tests_dir("fixtures/vocab.json")
a__ : List[Any] = get_tests_dir("fixtures")
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
lowercase__ = 0
def UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
lowercase__ = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h')
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = WavaVecaConfig()
lowercase__ = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h')
# save in new folder
model_config.save_pretrained(lowerCAmelCase)
processor.save_pretrained(lowerCAmelCase)
lowercase__ = AutoProcessor.from_pretrained(lowerCAmelCase)
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase))
copyfile(lowerCAmelCase , os.path.join(lowerCAmelCase , 'vocab.json'))
lowercase__ = AutoProcessor.from_pretrained(lowerCAmelCase)
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = WavaVecaFeatureExtractor()
lowercase__ = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h')
lowercase__ = WavaVecaProcessor(lowerCAmelCase , lowerCAmelCase)
# save in new folder
processor.save_pretrained(lowerCAmelCase)
# drop `processor_class` in tokenizer
with open(os.path.join(lowerCAmelCase , lowerCAmelCase) , 'r') as f:
lowercase__ = json.load(lowerCAmelCase)
config_dict.pop('processor_class')
with open(os.path.join(lowerCAmelCase , lowerCAmelCase) , 'w') as f:
f.write(json.dumps(lowerCAmelCase))
lowercase__ = AutoProcessor.from_pretrained(lowerCAmelCase)
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = WavaVecaFeatureExtractor()
lowercase__ = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h')
lowercase__ = WavaVecaProcessor(lowerCAmelCase , lowerCAmelCase)
# save in new folder
processor.save_pretrained(lowerCAmelCase)
# drop `processor_class` in feature extractor
with open(os.path.join(lowerCAmelCase , lowerCAmelCase) , 'r') as f:
lowercase__ = json.load(lowerCAmelCase)
config_dict.pop('processor_class')
with open(os.path.join(lowerCAmelCase , lowerCAmelCase) , 'w') as f:
f.write(json.dumps(lowerCAmelCase))
lowercase__ = AutoProcessor.from_pretrained(lowerCAmelCase)
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = WavaVecaConfig(processor_class='Wav2Vec2Processor')
model_config.save_pretrained(lowerCAmelCase)
# copy relevant files
copyfile(lowerCAmelCase , os.path.join(lowerCAmelCase , 'vocab.json'))
# create emtpy sample processor
with open(os.path.join(lowerCAmelCase , lowerCAmelCase) , 'w') as f:
f.write('{}')
lowercase__ = AutoProcessor.from_pretrained(lowerCAmelCase)
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase):
lowercase__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase):
lowercase__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowerCAmelCase)
lowercase__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowerCAmelCase)
self.assertTrue(processor.special_attribute_present)
self.assertEqual(processor.__class__.__name__ , 'NewProcessor')
lowercase__ = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
lowercase__ = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
lowercase__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowerCAmelCase , use_fast=lowerCAmelCase)
lowercase__ = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present)
self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer')
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
try:
AutoConfig.register('custom' , lowerCAmelCase)
AutoFeatureExtractor.register(lowerCAmelCase , lowerCAmelCase)
AutoTokenizer.register(lowerCAmelCase , slow_tokenizer_class=lowerCAmelCase)
AutoProcessor.register(lowerCAmelCase , lowerCAmelCase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase):
AutoProcessor.register(lowerCAmelCase , lowerCAmelCase)
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase__ = CustomFeatureExtractor.from_pretrained(lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = os.path.join(lowerCAmelCase , 'vocab.txt')
with open(lowerCAmelCase , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
lowercase__ = CustomTokenizer(lowerCAmelCase)
lowercase__ = CustomProcessor(lowerCAmelCase , lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowerCAmelCase)
lowercase__ = AutoProcessor.from_pretrained(lowerCAmelCase)
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : str = False
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Tuple = False
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Any = "AutoFeatureExtractor"
A : Any = "AutoTokenizer"
A : Union[str, Any] = False
try:
AutoConfig.register('custom' , lowerCAmelCase)
AutoFeatureExtractor.register(lowerCAmelCase , lowerCAmelCase)
AutoTokenizer.register(lowerCAmelCase , slow_tokenizer_class=lowerCAmelCase)
AutoProcessor.register(lowerCAmelCase , lowerCAmelCase)
# If remote code is not set, the default is to use local classes.
lowercase__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor')
self.assertEqual(processor.__class__.__name__ , 'NewProcessor')
self.assertFalse(processor.special_attribute_present)
self.assertFalse(processor.feature_extractor.special_attribute_present)
self.assertFalse(processor.tokenizer.special_attribute_present)
# If remote code is disabled, we load the local ones.
lowercase__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowerCAmelCase)
self.assertEqual(processor.__class__.__name__ , 'NewProcessor')
self.assertFalse(processor.special_attribute_present)
self.assertFalse(processor.feature_extractor.special_attribute_present)
self.assertFalse(processor.tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub.
lowercase__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowerCAmelCase)
self.assertEqual(processor.__class__.__name__ , 'NewProcessor')
self.assertTrue(processor.special_attribute_present)
self.assertTrue(processor.feature_extractor.special_attribute_present)
self.assertTrue(processor.tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast')
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
lowercase__ = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext')
self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor')
@is_staging_test
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
A : List[str] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def UpperCAmelCase ( cls : Union[str, Any]) -> List[str]:
"""simple docstring"""
lowercase__ = TOKEN
HfFolder.save_token(lowerCAmelCase)
@classmethod
def UpperCAmelCase ( cls : Dict) -> List[str]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-processor')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-processor-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-processor')
except HTTPError:
pass
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = WavaVecaProcessor.from_pretrained(lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCAmelCase , 'test-processor') , push_to_hub=lowerCAmelCase , use_auth_token=self._token)
lowercase__ = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''')
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(new_processor.feature_extractor , lowerCAmelCase))
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab())
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = WavaVecaProcessor.from_pretrained(lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCAmelCase , 'test-processor-org') , push_to_hub=lowerCAmelCase , use_auth_token=self._token , organization='valid_org' , )
lowercase__ = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org')
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(new_processor.feature_extractor , lowerCAmelCase))
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab())
def UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase__ = CustomFeatureExtractor.from_pretrained(lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = os.path.join(lowerCAmelCase , 'vocab.txt')
with open(lowerCAmelCase , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
lowercase__ = CustomTokenizer(lowerCAmelCase)
lowercase__ = CustomProcessor(lowerCAmelCase , lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token)
lowercase__ = Repository(lowerCAmelCase , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token)
processor.save_pretrained(lowerCAmelCase)
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowerCAmelCase , 'tokenizer_config.json')) as f:
lowercase__ = json.load(lowerCAmelCase)
self.assertDictEqual(
tokenizer_config['auto_map'] , {
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase , 'custom_feature_extraction.py')))
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase , 'custom_tokenization.py')))
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase , 'custom_processing.py')))
repo.push_to_hub()
lowercase__ = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=lowerCAmelCase)
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor')
| 642
|
# Imports
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None) -> Dict:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : str=None) -> int:
"""simple docstring"""
if red is not None:
lowercase__ = red
if green is not None:
lowercase__ = green
if blue is not None:
lowercase__ = blue
if red_edge is not None:
lowercase__ = red_edge
if nir is not None:
lowercase__ = nir
return True
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, Any]="" , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None) -> Union[str, Any]:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
lowercase__ = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!')
return False
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]=0.08 , lowerCAmelCase : Optional[int]=1.22 , lowerCAmelCase : int=0.03) -> List[Any]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
return self.nir - self.green
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def UpperCAmelCase ( self : int , lowerCAmelCase : int=0.16) -> Dict:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[int]=0.5) -> Union[str, Any]:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None) -> Tuple:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
lowercase__ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
lowercase__ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 642
| 1
|
import random
from .binary_exp_mod import bin_exp_mod
def _lowerCAmelCase ( A__ , A__=1_000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase__ = n - 1
lowercase__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase__ = 0
while count < prec:
lowercase__ = random.randint(2 , n - 1 )
lowercase__ = bin_exp_mod(A__ , A__ , A__ )
if b != 1:
lowercase__ = True
for _ in range(A__ ):
if b == n - 1:
lowercase__ = False
break
lowercase__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
a__ : Any = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 642
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = load_tool('text-classification')
self.tool.setup()
lowercase__ = load_tool('text-classification' , remote=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
lowercase__ = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
lowercase__ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
| 642
| 1
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
a__ : Tuple = logging.get_logger(__name__)
a__ : List[str] = "Hello, World!"
a__ : str = "en_XX"
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = Path('data_bin' )
lowercase__ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(A__ ).parent ) , checkpoint_file=Path(A__ ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(A__ ) , bpe='sentencepiece' , sentencepiece_model=str(Path(A__ ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(A__ )
lowercase__ = xmod.model.encoder.sentence_encoder
lowercase__ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowercase__ = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , A__ )
lowercase__ = XmodForSequenceClassification(A__ ) if classification_head else XmodForMaskedLM(A__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase__ = xmod_sent_encoder.embed_tokens.weight
lowercase__ = xmod_sent_encoder.embed_positions.weight
lowercase__ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowercase__ = xmod_sent_encoder.layernorm_embedding.weight
lowercase__ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase__ = model.roberta.encoder.layer[i]
lowercase__ = xmod_sent_encoder.layers[i]
# self attention
lowercase__ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
lowercase__ = xmod_layer.self_attn.q_proj.weight
lowercase__ = xmod_layer.self_attn.q_proj.bias
lowercase__ = xmod_layer.self_attn.k_proj.weight
lowercase__ = xmod_layer.self_attn.k_proj.bias
lowercase__ = xmod_layer.self_attn.v_proj.weight
lowercase__ = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowercase__ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
lowercase__ = xmod_layer.self_attn.out_proj.weight
lowercase__ = xmod_layer.self_attn.out_proj.bias
lowercase__ = xmod_layer.self_attn_layer_norm.weight
lowercase__ = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowercase__ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
lowercase__ = xmod_layer.fca.weight
lowercase__ = xmod_layer.fca.bias
# output
lowercase__ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
lowercase__ = xmod_layer.fca.weight
lowercase__ = xmod_layer.fca.bias
lowercase__ = xmod_layer.final_layer_norm.weight
lowercase__ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowercase__ = xmod_layer.adapter_layer_norm.weight
lowercase__ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowercase__ = bert_output.adapter_modules[lang_code]
lowercase__ = xmod_layer.adapter_modules[lang_code]
lowercase__ = from_adapter.fca.weight
lowercase__ = from_adapter.fca.bias
lowercase__ = from_adapter.fca.weight
lowercase__ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowercase__ = xmod_sent_encoder.layer_norm.weight
lowercase__ = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowercase__ = xmod.model.classification_heads['mnli'].dense.weight
lowercase__ = xmod.model.classification_heads['mnli'].dense.bias
lowercase__ = xmod.model.classification_heads['mnli'].out_proj.weight
lowercase__ = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
lowercase__ = xmod.model.encoder.lm_head.dense.weight
lowercase__ = xmod.model.encoder.lm_head.dense.bias
lowercase__ = xmod.model.encoder.lm_head.layer_norm.weight
lowercase__ = xmod.model.encoder.lm_head.layer_norm.bias
lowercase__ = xmod.model.encoder.lm_head.weight
lowercase__ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase__ = xmod.encode(A__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(A__ )
lowercase__ = model(A__ )[0]
if classification_head:
lowercase__ = xmod.model.classification_heads['mnli'](xmod.extract_features(A__ ) )
else:
lowercase__ = xmod.model(A__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowercase__ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
lowercase__ = torch.allclose(A__ , A__ , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(A__ ).mkdir(parents=A__ , exist_ok=A__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
a__ : int = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 642
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = None
A : Optional[int] = None
@property
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size'))
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate'))
self.assertTrue(hasattr(lowerCAmelCase , 'padding_value'))
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name])))
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = self.feat_extract_tester.seq_length_diff
lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowercase__ = self.feat_extract_tester.min_seq_length
lowercase__ = self.feat_extract_tester.batch_size
lowercase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest')
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1]))
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
lowercase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
# truncate to smallest
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]))
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to smallest with np
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to middle
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = 12
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , )
lowercase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase__ = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
@require_torch
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = min(lowerCAmelCase)
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 642
| 1
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _lowerCAmelCase ( A__ ):
lowercase__ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _lowerCAmelCase ( A__ ):
lowercase__, lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(A__ , A__ , bias=A__ )
lowercase__ = emb.weight.data
return lin_layer
def _lowerCAmelCase ( A__ , A__=None ):
lowercase__ = {}
for old_key in state_dict.keys():
lowercase__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowercase__ = key.replace('moe_layer.experts.0' , F'''ffn.experts.expert_{expert_idx}''' )
else:
lowercase__ = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
lowercase__ = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
lowercase__ = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
lowercase__ = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
lowercase__ = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
lowercase__ = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
lowercase__ = key.replace('final_layer_norm' , 'ff_layer_norm' )
lowercase__ = state_dict[old_key]
return new_dict
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ = WEIGHTS_NAME ):
lowercase__ = []
lowercase__ = 0
os.makedirs(A__ , exist_ok=A__ )
for expert in range(A__ ):
lowercase__ = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(A__ ):
lowercase__ = torch.load(A__ )['model']
remove_ignore_keys_(A__ )
lowercase__ = rename_fairseq_keys(A__ , A__ )
lowercase__ = os.path.join(
A__ , weights_name.replace('.bin' , F'''-{len(A__ )+1:05d}-of-???.bin''' ) )
torch.save(A__ , A__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(A__ )[0]].dtype )
# Add the last block
lowercase__ = os.path.join(A__ , weights_name.replace('.bin' , F'''-{len(A__ )+1:05d}-of-???.bin''' ) )
lowercase__ = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(A__ )
lowercase__ = rename_fairseq_keys(A__ , A__ )
lowercase__ = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(A__ ) == 1:
lowercase__ = os.path.join(A__ , A__ )
torch.save(A__ , A__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(A__ , A__ )
# Otherwise, let's build the index
lowercase__ = {}
for idx, shard in enumerate(A__ ):
lowercase__ = weights_name.replace('.bin' , F'''-{idx+1:05d}-of-{len(A__ ):05d}.bin''' )
lowercase__ = os.path.join(A__ , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(A__ , os.path.join(A__ , A__ ) )
for key in shard:
lowercase__ = shard_file
# Add the metadata
lowercase__ = {'total_size': total_size}
lowercase__ = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(A__ , A__ ) , 'w' , encoding='utf-8' ) as f:
lowercase__ = json.dumps(A__ , indent=2 , sort_keys=A__ ) + '\n'
f.write(A__ )
return metadata, index
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
a__ : Optional[Any] = parser.parse_args()
a__ , a__ : int = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
a__ : Tuple = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
a__ : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 642
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowerCAmelCase ( A__ ):
lowercase__ = prime_factors(A__ )
if is_square_free(A__ ):
return -1 if len(A__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a__ : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a__ : Optional[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a__ : Tuple = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def UpperCAmelCase ( self : int , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase)
}
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : List[str] = logging.get_logger(__name__)
a__ : List[Any] = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
A : List[str] = "focalnet"
def __init__( self : Dict , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=96 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int=[1_92, 3_84, 7_68, 7_68] , lowerCAmelCase : str=[2, 2, 6, 2] , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[3, 3, 3, 3] , lowerCAmelCase : int="gelu" , lowerCAmelCase : Any=4.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Tuple=1E-4 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = use_conv_embed
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = focal_levels
lowercase__ = focal_windows
lowercase__ = hidden_act
lowercase__ = mlp_ratio
lowercase__ = hidden_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = use_layerscale
lowercase__ = layerscale_value
lowercase__ = use_post_layernorm
lowercase__ = use_post_layernorm_in_modulation
lowercase__ = normalize_modulator
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = encoder_stride
lowercase__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)]
lowercase__, lowercase__ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
| 642
| 1
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
lowercase__ = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_60_00,
'return_attention_mask': False,
'do_normalize': True,
}
lowercase__ = tempfile.mkdtemp()
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(self.tmpdirname , lowerCAmelCase)
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowerCAmelCase) + '\n')
with open(self.feature_extraction_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowerCAmelCase) + '\n')
# load decoder from hub
lowercase__ = 'hf-internal-testing/ngram-beam-search-decoder'
def UpperCAmelCase ( self : Tuple , **lowerCAmelCase : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCAmelCase)
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowerCAmelCase)
def UpperCAmelCase ( self : str , **lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_feature_extractor()
lowercase__ = self.get_decoder()
lowercase__ = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase , decoder=lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowercase__ = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname)
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , lowerCAmelCase)
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor , lowerCAmelCase)
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels)
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder())
processor.save_pretrained(self.tmpdirname)
# make sure that error is thrown when decoder alphabet doesn't match
lowercase__ = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3)
# decoder
self.assertEqual(processor.language_model.alpha , 5.0)
self.assertEqual(processor.language_model.beta , 3.0)
self.assertEqual(processor.language_model.score_boundary , -7.0)
self.assertEqual(processor.language_model.unk_score_offset , 3)
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'])
with self.assertRaisesRegex(lowerCAmelCase , 'include'):
WavaVecaProcessorWithLM(
tokenizer=lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder())
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_feature_extractor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_decoder()
lowercase__ = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase , decoder=lowerCAmelCase)
lowercase__ = floats_list((3, 10_00))
lowercase__ = feature_extractor(lowerCAmelCase , return_tensors='np')
lowercase__ = processor(lowerCAmelCase , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_feature_extractor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_decoder()
lowercase__ = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase , decoder=lowerCAmelCase)
lowercase__ = 'This is a test string'
lowercase__ = processor(text=lowerCAmelCase)
lowercase__ = tokenizer(lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCAmelCase ( self : Any , lowerCAmelCase : Tuple=(2, 10, 16) , lowerCAmelCase : int=77) -> Optional[Any]:
"""simple docstring"""
np.random.seed(lowerCAmelCase)
return np.random.rand(*lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = self.get_feature_extractor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_decoder()
lowercase__ = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase , decoder=lowerCAmelCase)
lowercase__ = self._get_dummy_logits(shape=(10, 16) , seed=13)
lowercase__ = processor.decode(lowerCAmelCase)
lowercase__ = decoder.decode_beams(lowerCAmelCase)[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text)
self.assertEqual('</s> <s> </s>' , decoded_processor.text)
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score)
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score)
@parameterized.expand([[None], ['fork'], ['spawn']])
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_feature_extractor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_decoder()
lowercase__ = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase , decoder=lowerCAmelCase)
lowercase__ = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowercase__ = processor.batch_decode(lowerCAmelCase)
else:
with get_context(lowerCAmelCase).Pool() as pool:
lowercase__ = processor.batch_decode(lowerCAmelCase , lowerCAmelCase)
lowercase__ = list(lowerCAmelCase)
with get_context('fork').Pool() as p:
lowercase__ = decoder.decode_beams_batch(lowerCAmelCase , lowerCAmelCase)
lowercase__, lowercase__, lowercase__ = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0])
logit_scores_decoder.append(beams[0][-2])
lm_scores_decoder.append(beams[0][-1])
self.assertListEqual(lowerCAmelCase , decoded_processor.text)
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text)
self.assertListEqual(lowerCAmelCase , decoded_processor.logit_score)
self.assertListEqual(lowerCAmelCase , decoded_processor.lm_score)
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.get_feature_extractor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_decoder()
lowercase__ = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase , decoder=lowerCAmelCase)
lowercase__ = self._get_dummy_logits()
lowercase__ = 15
lowercase__ = -20.0
lowercase__ = -4.0
lowercase__ = processor.batch_decode(
lowerCAmelCase , beam_width=lowerCAmelCase , beam_prune_logp=lowerCAmelCase , token_min_logp=lowerCAmelCase , )
lowercase__ = decoded_processor_out.text
lowercase__ = list(lowerCAmelCase)
with get_context('fork').Pool() as pool:
lowercase__ = decoder.decode_beams_batch(
lowerCAmelCase , lowerCAmelCase , beam_width=lowerCAmelCase , beam_prune_logp=lowerCAmelCase , token_min_logp=lowerCAmelCase , )
lowercase__ = [d[0][0] for d in decoded_decoder_out]
lowercase__ = [d[0][2] for d in decoded_decoder_out]
lowercase__ = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , lowerCAmelCase)
self.assertTrue(np.array_equal(lowerCAmelCase , decoded_processor_out.logit_score))
self.assertTrue(np.allclose([-20.0_54, -18.4_47] , lowerCAmelCase , atol=1E-3))
self.assertTrue(np.array_equal(lowerCAmelCase , decoded_processor_out.lm_score))
self.assertTrue(np.allclose([-15.5_54, -13.94_74] , lowerCAmelCase , atol=1E-3))
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
lowercase__ = self.get_feature_extractor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_decoder()
lowercase__ = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase , decoder=lowerCAmelCase)
lowercase__ = self._get_dummy_logits()
lowercase__ = 2.0
lowercase__ = 5.0
lowercase__ = -20.0
lowercase__ = True
lowercase__ = processor.batch_decode(
lowerCAmelCase , alpha=lowerCAmelCase , beta=lowerCAmelCase , unk_score_offset=lowerCAmelCase , lm_score_boundary=lowerCAmelCase , )
lowercase__ = decoded_processor_out.text
lowercase__ = list(lowerCAmelCase)
decoder.reset_params(
alpha=lowerCAmelCase , beta=lowerCAmelCase , unk_score_offset=lowerCAmelCase , lm_score_boundary=lowerCAmelCase , )
with get_context('fork').Pool() as pool:
lowercase__ = decoder.decode_beams_batch(
lowerCAmelCase , lowerCAmelCase , )
lowercase__ = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , lowerCAmelCase)
lowercase__ = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0)
self.assertEqual(lm_model.beta , 5.0)
self.assertEqual(lm_model.unk_score_offset , -20.0)
self.assertEqual(lm_model.score_boundary , lowerCAmelCase)
def UpperCAmelCase ( self : int) -> List[str]:
"""simple docstring"""
lowercase__ = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
lowercase__ = processor.decoder.model_container[processor.decoder._model_key]
lowercase__ = Path(language_model._kenlm_model.path.decode('utf-8')).parent.parent.absolute()
lowercase__ = os.listdir(lowerCAmelCase)
lowercase__ = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
lowercase__ = snapshot_download('hf-internal-testing/processor_with_lm')
lowercase__ = WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase)
lowercase__ = processor.decoder.model_container[processor.decoder._model_key]
lowercase__ = Path(language_model._kenlm_model.path.decode('utf-8')).parent.parent.absolute()
lowercase__ = os.listdir(lowerCAmelCase)
lowercase__ = os.listdir(lowerCAmelCase)
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
lowercase__ = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
lowercase__ = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm')
lowercase__ = floats_list((3, 10_00))
lowercase__ = processor_wavaveca(lowerCAmelCase , return_tensors='np')
lowercase__ = processor_auto(lowerCAmelCase , return_tensors='np')
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2)
lowercase__ = self._get_dummy_logits()
lowercase__ = processor_wavaveca.batch_decode(lowerCAmelCase)
lowercase__ = processor_auto.batch_decode(lowerCAmelCase)
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text)
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
lowercase__ = self.get_feature_extractor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_decoder()
lowercase__ = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase , decoder=lowerCAmelCase)
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : int , lowerCAmelCase : str) -> int:
"""simple docstring"""
lowercase__ = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
lowercase__ = self._get_dummy_logits()[0]
lowercase__ = processor.decode(lowerCAmelCase , output_word_offsets=lowerCAmelCase)
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys()) , 4)
self.assertTrue('text' in outputs)
self.assertTrue('word_offsets' in outputs)
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase))
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word')) , outputs.text)
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word') , ['<s>', '<s>', '</s>'])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset') , [0, 2, 4])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset') , [1, 3, 5])
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
lowercase__ = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
lowercase__ = self._get_dummy_logits()
lowercase__ = processor.batch_decode(lowerCAmelCase , output_word_offsets=lowerCAmelCase)
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys()) , 4)
self.assertTrue('text' in outputs)
self.assertTrue('word_offsets' in outputs)
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase))
self.assertListEqual(
[' '.join(self.get_from_offsets(lowerCAmelCase , 'word')) for o in outputs['word_offsets']] , outputs.text)
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word') , ['<s>', '<s>', '</s>'])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset') , [0, 2, 4])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset') , [1, 3, 5])
@slow
@require_torch
@require_torchaudio
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
import torch
lowercase__ = load_dataset('common_voice' , 'en' , split='train' , streaming=lowerCAmelCase)
lowercase__ = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_60_00))
lowercase__ = iter(lowerCAmelCase)
lowercase__ = next(lowerCAmelCase)
lowercase__ = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm')
lowercase__ = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm')
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowercase__ = processor(sample['audio']['array'] , return_tensors='pt').input_values
with torch.no_grad():
lowercase__ = model(lowerCAmelCase).logits.cpu().numpy()
lowercase__ = processor.decode(logits[0] , output_word_offsets=lowerCAmelCase)
lowercase__ = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowercase__ = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
lowercase__ = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(lowerCAmelCase , 'word')) , lowerCAmelCase)
self.assertEqual(' '.join(self.get_from_offsets(lowerCAmelCase , 'word')) , output.text)
# output times
lowercase__ = torch.tensor(self.get_from_offsets(lowerCAmelCase , 'start_time'))
lowercase__ = torch.tensor(self.get_from_offsets(lowerCAmelCase , 'end_time'))
# fmt: off
lowercase__ = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99])
lowercase__ = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94])
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=0.01))
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=0.01))
| 642
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
a__ : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
a__ : Any = {"facebook/blenderbot_small-90M": 5_12}
def _lowerCAmelCase ( A__ ):
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
lowercase__ = set(A__ )
return pairs
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[str] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : int="__start__" , lowerCAmelCase : Dict="__end__" , lowerCAmelCase : Any="__unk__" , lowerCAmelCase : str="__null__" , **lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase)
with open(lowerCAmelCase , encoding='utf-8') as vocab_handle:
lowercase__ = json.load(lowerCAmelCase)
lowercase__ = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase , encoding='utf-8') as merges_handle:
lowercase__ = merges_handle.read().split('\n')[1:-1]
lowercase__ = [tuple(merge.split()) for merge in merges]
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = {}
@property
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return len(self.encoder)
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self : str , lowerCAmelCase : str) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ = re.sub('([.,!?()])' , R' \1' , lowerCAmelCase)
lowercase__ = re.sub('(\')' , R' \1 ' , lowerCAmelCase)
lowercase__ = re.sub(R'\s{2,}' , ' ' , lowerCAmelCase)
if "\n" in token:
lowercase__ = token.replace('\n' , ' __newln__')
lowercase__ = token.split(' ')
lowercase__ = []
for token in tokens:
if not len(lowerCAmelCase):
continue
lowercase__ = token.lower()
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = tuple(list(word[:-1]) + [word[-1] + '</w>'])
lowercase__ = get_pairs(lowerCAmelCase)
if not pairs:
words.append(lowerCAmelCase)
continue
while True:
lowercase__ = min(lowerCAmelCase , key=lambda lowerCAmelCase: self.bpe_ranks.get(lowerCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__, lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(lowerCAmelCase):
try:
lowercase__ = word.index(lowerCAmelCase , lowerCAmelCase)
new_word.extend(word[i:j])
lowercase__ = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowerCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = new_word
if len(lowerCAmelCase) == 1:
break
else:
lowercase__ = get_pairs(lowerCAmelCase)
lowercase__ = '@@ '.join(lowerCAmelCase)
lowercase__ = word[:-4]
lowercase__ = word
words.append(lowerCAmelCase)
return " ".join(lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = re.findall(R'\S+\n?' , lowerCAmelCase)
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase).split(' ')))
return split_tokens
def UpperCAmelCase ( self : int , lowerCAmelCase : str) -> int:
"""simple docstring"""
lowercase__ = token.lower()
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[str]) -> str:
"""simple docstring"""
lowercase__ = ' '.join(lowerCAmelCase).replace('@@ ' , '').strip()
return out_string
def UpperCAmelCase ( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(lowerCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase) + '\n')
lowercase__ = 0
with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!')
lowercase__ = token_index
writer.write(' '.join(lowerCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
| 642
| 1
|
import fire
from utils import calculate_rouge, save_json
def _lowerCAmelCase ( A__ , A__ , A__=None , **A__ ):
lowercase__ = [x.strip() for x in open(A__ ).readlines()]
lowercase__ = [x.strip() for x in open(A__ ).readlines()][: len(A__ )]
lowercase__ = calculate_rouge(A__ , A__ , **A__ )
if save_path is not None:
save_json(A__ , A__ , indent=A__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = PhobertTokenizer
A : List[Any] = False
def UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = ['#version: 0.2', 'l à</w>']
lowercase__ = {'unk_token': '<unk>'}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowerCAmelCase))
def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Any) -> Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase)
def UpperCAmelCase ( self : str , lowerCAmelCase : List[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = 'Tôi là VinAI Research'
lowercase__ = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
lowercase__ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
lowercase__ = 'Tôi là VinAI Research'
lowercase__ = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
print(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase) , lowerCAmelCase)
| 642
|
import heapq
import sys
import numpy as np
a__ : Dict = tuple[int, int]
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = []
lowercase__ = set()
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf')
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return len(self.elements) == 0
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(lowerCAmelCase)
else:
# update
# print("update", item)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
if item in self.set:
self.set.remove(lowerCAmelCase)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
self.set.remove(lowerCAmelCase)
return (priority, item)
def _lowerCAmelCase ( A__ , A__ ):
# euclidean distance
lowercase__ = np.array(A__ )
lowercase__ = np.array(A__ )
return np.linalg.norm(a - b )
def _lowerCAmelCase ( A__ , A__ ):
# integer division by time variable
return consistent_heuristic(A__ , A__ ) // t
def _lowerCAmelCase ( A__ , A__ ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ )
return ans
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = np.chararray((n, n) )
for i in range(A__ ):
for j in range(A__ ):
lowercase__ = '*'
for i in range(A__ ):
for j in range(A__ ):
if (j, (n - 1) - i) in blocks:
lowercase__ = '#'
lowercase__ = '-'
lowercase__ = back_pointer[goal]
while x != start:
((lowercase__), (lowercase__)) = x
# print(x)
lowercase__ = '-'
lowercase__ = back_pointer[x]
lowercase__ = '-'
for i in range(A__ ):
for j in range(A__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowercase__ = back_pointer[goal]
while x != start:
print(A__ , end=' ' )
lowercase__ = back_pointer[x]
print(A__ )
sys.exit()
def _lowerCAmelCase ( A__ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
for itera in range(A__ ):
open_list[itera].remove_element(A__ )
# print("s", s)
# print("j", j)
((lowercase__), (lowercase__)) = s
lowercase__ = (x - 1, y)
lowercase__ = (x + 1, y)
lowercase__ = (x, y + 1)
lowercase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(A__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(A__ )
lowercase__ = -1
lowercase__ = float('inf' )
if valid(A__ ) and g_function[neighbours] > g_function[s] + 1:
lowercase__ = g_function[s] + 1
lowercase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) )
if neighbours not in close_list_inad:
for var in range(1 , A__ ):
if key(A__ , A__ , A__ , A__ ) <= Wa * key(
A__ , 0 , A__ , A__ ):
open_list[j].put(
A__ , key(A__ , A__ , A__ , A__ ) )
def _lowerCAmelCase ( ):
lowercase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a__ : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a__ : Any = make_common_ground()
a__ : Union[str, Any] = blocks_blk
# hyper parameters
a__ : List[Any] = 1
a__ : List[str] = 1
a__ : Optional[int] = 20
a__ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a__ : Tuple = (0, 0)
a__ : str = (n - 1, n - 1)
a__ : Optional[Any] = 1
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = {start: 0, goal: float('inf' )}
lowercase__ = {start: -1, goal: -1}
lowercase__ = []
lowercase__ = set()
for i in range(A__ ):
open_list.append(PriorityQueue() )
open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) )
lowercase__ = []
lowercase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , A__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__, lowercase__ = open_list[i].top_show()
visited.add(A__ )
expand_state(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_inad.append(A__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__ = open_list[0].top_show()
visited.add(A__ )
expand_state(
A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_anchor.append(A__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(A__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 642
| 1
|
from __future__ import annotations
import math
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(A__ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , A__ , A__ , A__ ) , minimax(depth + 1 , node_index * 2 + 1 , A__ , A__ , A__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , A__ , A__ , A__ ) , minimax(depth + 1 , node_index * 2 + 1 , A__ , A__ , A__ ) , )
def _lowerCAmelCase ( ):
lowercase__ = [90, 23, 6, 33, 21, 65, 123, 34_423]
lowercase__ = math.log(len(A__ ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , A__ , A__ , A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 642
|
import math
import sys
def _lowerCAmelCase ( A__ ):
lowercase__ = ''
try:
with open(A__ , 'rb' ) as binary_file:
lowercase__ = binary_file.read()
for dat in data:
lowercase__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = {'0': '0', '1': '1'}
lowercase__, lowercase__ = '', ''
lowercase__ = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__ = lexicon[curr_string]
result += last_match_id
lowercase__ = last_match_id + '0'
if math.loga(A__ ).is_integer():
lowercase__ = {}
for curr_key in list(A__ ):
lowercase__ = lexicon.pop(A__ )
lowercase__ = new_lex
lowercase__ = last_match_id + '1'
index += 1
lowercase__ = ''
return result
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 8
try:
with open(A__ , 'wb' ) as opened_file:
lowercase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase__ = data_bits[counter:]
lowercase__ = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = read_file_binary(A__ )
lowercase__ = remove_prefix(A__ )
lowercase__ = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 642
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[Any] = logging.get_logger(__name__)
a__ : Dict = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = "blip_text_model"
def __init__( self : int , lowerCAmelCase : Dict=3_05_24 , lowerCAmelCase : Dict=7_68 , lowerCAmelCase : Dict=7_68 , lowerCAmelCase : Union[str, Any]=30_72 , lowerCAmelCase : Any=7_68 , lowerCAmelCase : Dict=12 , lowerCAmelCase : int=8 , lowerCAmelCase : List[Any]=5_12 , lowerCAmelCase : str="gelu" , lowerCAmelCase : Union[str, Any]=1E-1_2 , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : int=0.02 , lowerCAmelCase : str=3_05_22 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : str=0 , lowerCAmelCase : Optional[Any]=1_02 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Dict=True , **lowerCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , sep_token_id=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = encoder_hidden_size
lowercase__ = intermediate_size
lowercase__ = projection_dim
lowercase__ = hidden_dropout_prob
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = max_position_embeddings
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = attention_probs_dropout_prob
lowercase__ = is_decoder
lowercase__ = use_cache
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , lowerCAmelCase : Union[str, os.PathLike] , **lowerCAmelCase : List[Any]) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase)
lowercase__, lowercase__ = cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase)
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type') == "blip":
lowercase__ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(lowerCAmelCase , **lowerCAmelCase)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Dict = "blip_vision_model"
def __init__( self : Any , lowerCAmelCase : Union[str, Any]=7_68 , lowerCAmelCase : int=30_72 , lowerCAmelCase : Optional[Any]=5_12 , lowerCAmelCase : List[Any]=12 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : Optional[Any]=3_84 , lowerCAmelCase : Any=16 , lowerCAmelCase : str="gelu" , lowerCAmelCase : int=1E-5 , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : List[str]=1E-1_0 , **lowerCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = projection_dim
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def UpperCAmelCase ( cls : str , lowerCAmelCase : Union[str, os.PathLike] , **lowerCAmelCase : Dict) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase)
lowercase__, lowercase__ = cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase)
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type') == "blip":
lowercase__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(lowerCAmelCase , **lowerCAmelCase)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : int = "blip"
A : str = True
def __init__( self : Optional[Any] , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[int]=5_12 , lowerCAmelCase : Tuple=2.65_92 , lowerCAmelCase : List[Any]=2_56 , **lowerCAmelCase : str , ) -> int:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
if text_config is None:
lowercase__ = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.')
if vision_config is None:
lowercase__ = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.')
lowercase__ = BlipTextConfig(**lowerCAmelCase)
lowercase__ = BlipVisionConfig(**lowerCAmelCase)
lowercase__ = self.vision_config.hidden_size
lowercase__ = projection_dim
lowercase__ = logit_scale_init_value
lowercase__ = 1.0
lowercase__ = 0.02
lowercase__ = image_text_hidden_size
@classmethod
def UpperCAmelCase ( cls : Dict , lowerCAmelCase : BlipTextConfig , lowerCAmelCase : BlipVisionConfig , **lowerCAmelCase : Dict) -> str:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> Any:
"""simple docstring"""
lowercase__ = copy.deepcopy(self.__dict__)
lowercase__ = self.text_config.to_dict()
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 642
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {"vocab_file": "vocab.txt"}
a__ : int = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
a__ : Dict = {
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def _lowerCAmelCase ( A__ ):
with open(A__ , 'r' ) as f:
lowercase__ = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Dict="<cls>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Optional[Any]="<eos>" , **lowerCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = load_vocab_file(lowerCAmelCase)
lowercase__ = dict(enumerate(self.all_tokens))
lowercase__ = {tok: ind for ind, tok in enumerate(self.all_tokens)}
lowercase__ = unk_token
lowercase__ = cls_token
lowercase__ = pad_token
lowercase__ = mask_token
lowercase__ = eos_token
lowercase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
return text.split()
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any=False) -> Union[str, Any]:
"""simple docstring"""
return len(self._id_to_token)
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens)}
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.cls_token_id]
lowercase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase__ = [1] + ([0] * len(lowerCAmelCase)) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCAmelCase) + [1]
return mask
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(lowerCAmelCase , 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[List[str], List[AddedToken]] , lowerCAmelCase : bool = False) -> int:
"""simple docstring"""
return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase)
| 642
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
lowercase__ = TFCamembertModel.from_pretrained('jplu/tf-camembert-base')
lowercase__ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowercase__ = model(lowerCAmelCase)['last_hidden_state']
lowercase__ = tf.TensorShape((1, 10, 7_68))
self.assertEqual(output.shape , lowerCAmelCase)
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 642
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a__ : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a__ : Optional[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a__ : Tuple = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def UpperCAmelCase ( self : int , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase)
}
| 642
| 1
|
from math import sqrt
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for i in range(1 , int(sqrt(A__ ) + 1 ) ):
if n % i == 0 and i != sqrt(A__ ):
total += i + n // i
elif i == sqrt(A__ ):
total += i
return total - n
def _lowerCAmelCase ( A__ = 10_000 ):
lowercase__ = sum(
i
for i in range(1 , A__ )
if sum_of_divisors(sum_of_divisors(A__ ) ) == i and sum_of_divisors(A__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 642
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = block_sizes
lowercase__ = num_decoder_layers
lowercase__ = d_model
lowercase__ = n_head
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = 2
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase__ = n_head
# Used in the tests to check the size of the first hidden state
lowercase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase__ = self.num_hidden_layers + 2
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str:
"""simple docstring"""
lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase)
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A : Dict = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Optional[int] = False
A : Optional[int] = False
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase)
@require_tf
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A : List[str] = False
A : int = False
def UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
| 642
| 1
|
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
a__ : List[Any] = getLogger(__name__)
def _lowerCAmelCase ( A__ , A__ , A__ , A__ = 8 , A__ = 1_024 , A__="val" , A__=None , A__=False , A__="summarization" , A__=None , A__=1 , A__ = None , A__="" , **A__ , ):
lowercase__ = str(A__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=A__ )
lowercase__ = Path(A__ )
lowercase__ = save_dir.joinpath(F'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(A__ )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(A__ ).cuda()
if fpaa:
lowercase__ = model.half()
# determine if we need to increase num_beams
use_task_specific_params(A__ , A__ ) # update config with task specific params
lowercase__ = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
lowercase__ = num_return_sequences
lowercase__ = AutoTokenizer.from_pretrained(A__ )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
lowercase__ = tokenizer.model_max_length
if prefix is None:
lowercase__ = prefix or getattr(model.config , 'prefix' , '' ) or ''
lowercase__ = SeqaSeqDataset(
A__ , A__ , A__ , max_target_length=1_024 , type_path=A__ , n_obs=A__ , prefix=A__ , **A__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
lowercase__ = ds.make_sortish_sampler(A__ , distributed=A__ , add_extra_examples=A__ , shuffle=A__ )
lowercase__ = DataLoader(A__ , sampler=A__ , batch_size=A__ , collate_fn=ds.collate_fn )
lowercase__ = []
for batch in tqdm(A__ ):
lowercase__ = model.generate(
input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=A__ , num_beams=A__ , **A__ , )
lowercase__ = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
lowercase__ = batch['ids']
if num_return_sequences > 1:
lowercase__ = chunks(A__ , A__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(A__ ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(A__ , A__ )
return results, sampler.num_replicas
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' , type=A__ , help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' , type=A__ , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=A__ , help='where to save' , default='tmp_gen' )
parser.add_argument('--max_source_length' , type=A__ , default=A__ )
parser.add_argument(
'--type_path' , type=A__ , default='test' , help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' , type=A__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=A__ , default=8 , required=A__ , help='batch size' )
parser.add_argument(
'--local_rank' , type=A__ , default=-1 , required=A__ , help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' , type=A__ , default=A__ , required=A__ , help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' , type=A__ , default=1 , required=A__ , help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' , type=A__ , default=600 , required=A__ , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=A__ , default=A__ , required=A__ )
parser.add_argument('--tgt_lang' , type=A__ , default=A__ , required=A__ )
parser.add_argument(
'--prefix' , type=A__ , required=A__ , default=A__ , help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--debug' , action='store_true' )
lowercase__ = time.time()
lowercase__, lowercase__ = parser.parse_known_args()
lowercase__ = parse_numeric_n_bool_cl_kwargs(A__ )
if generate_kwargs and args.local_rank <= 0:
print(F'''parsed the following generate kwargs: {generate_kwargs}''' )
lowercase__ = Path(args.save_dir + '_tmp' )
Path(A__ ).mkdir(exist_ok=A__ ) # this handles locking.
lowercase__ = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(F'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
lowercase__ = {}
if args.src_lang is not None:
lowercase__ = args.src_lang
if args.tgt_lang is not None:
lowercase__ = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=A__ )
lowercase__, lowercase__ = eval_data_dir(
args.data_dir , A__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=A__ , **A__ , )
if args.local_rank <= 0:
lowercase__ = Path(args.save_dir )
save_dir.mkdir(exist_ok=A__ )
lowercase__ = gather_results_from_each_node(A__ , A__ , args.sync_timeout )
lowercase__ = combine_partial_results(A__ )
if args.num_return_sequences > 1:
lowercase__ = save_dir.joinpath('pseudolabel_results.json' )
print(F'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(A__ , A__ )
return
lowercase__ = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(A__ ) as f:
lowercase__ = [x.rstrip() for x in f.readlines()][: len(A__ )]
# Calculate metrics, save metrics, and save _generations.txt
lowercase__ = 'translation' in args.task
lowercase__ = calculate_bleu if calc_bleu else calculate_rouge
lowercase__ = 'bleu' if calc_bleu else 'rouge'
lowercase__ = score_fn(A__ , A__ )
lowercase__ = len(A__ )
lowercase__ = time.time() - start_time
lowercase__ = round(runtime / metrics['n_obs'] , 4 )
lowercase__ = num_replicas
# TODO(@stas00): add whatever metadata to metrics
lowercase__ = save_dir.joinpath(F'''{args.type_path}_{metric_name}.json''' )
save_json(A__ , A__ , indent=A__ )
print(A__ )
write_txt_file(A__ , save_dir.joinpath(F'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(A__ , save_dir.joinpath(F'''{args.type_path}.target''' ) )
else:
shutil.rmtree(A__ )
def _lowerCAmelCase ( A__ ):
lowercase__ = []
for partial_result in partial_results:
records.extend(A__ )
lowercase__ = sorted(A__ , key=lambda A__ : x["id"] )
lowercase__ = [x['pred'] for x in records]
return preds
def _lowerCAmelCase ( A__ , A__ , A__ ):
# WAIT FOR lots of .json files
lowercase__ = time.time()
logger.info('waiting for all nodes to finish' )
lowercase__ = None
while (time.time() - start_wait) < timeout:
lowercase__ = list(save_dir.glob('rank_*.json' ) )
if len(A__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
lowercase__ = lmap(A__ , A__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 642
|
def _lowerCAmelCase ( A__ , A__ , A__ ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
lowercase__ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowercase__ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase ( A__ ):
if not is_accelerate_available():
return method
lowercase__ = version.parse(accelerate.__version__ ).base_version
if version.parse(A__ ) < version.parse('0.17.0' ):
return method
def wrapper(self , *A__ , **A__ ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *A__ , **A__ )
return wrapper
| 642
|
from __future__ import annotations
def _lowerCAmelCase ( A__ , A__ ):
if b == 0:
return (1, 0)
((lowercase__), (lowercase__)) = extended_euclid(A__ , a % b )
lowercase__ = a // b
return (y, x - k * y)
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowerCAmelCase ( A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
if b < 0:
lowercase__ = (b % n + n) % n
return b
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__, lowercase__ = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 642
| 1
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _lowerCAmelCase ( A__ ):
lowercase__ = {}
lowercase__ = job['started_at']
lowercase__ = job['completed_at']
lowercase__ = date_parser.parse(A__ )
lowercase__ = date_parser.parse(A__ )
lowercase__ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowercase__ = start
lowercase__ = end
lowercase__ = duration_in_min
return job_info
def _lowerCAmelCase ( A__ , A__=None ):
lowercase__ = None
if token is not None:
lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
lowercase__ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
lowercase__ = requests.get(A__ , headers=A__ ).json()
lowercase__ = {}
try:
job_time.update({job['name']: extract_time_from_single_job(A__ ) for job in result['jobs']} )
lowercase__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(A__ ):
lowercase__ = requests.get(url + F'''&page={i + 2}''' , headers=A__ ).json()
job_time.update({job['name']: extract_time_from_single_job(A__ ) for job in result['jobs']} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
a__ : Any = parser.parse_args()
a__ : str = get_job_time(args.workflow_run_id)
a__ : Dict = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 642
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[Any] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = "umt5"
A : List[str] = ["past_key_values"]
def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str:
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_kv
lowercase__ = d_ff
lowercase__ = num_layers
lowercase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ = num_heads
lowercase__ = relative_attention_num_buckets
lowercase__ = relative_attention_max_distance
lowercase__ = dropout_rate
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_factor
lowercase__ = feed_forward_proj
lowercase__ = use_cache
lowercase__ = self.feed_forward_proj.split('-')
lowercase__ = act_info[-1]
lowercase__ = act_info[0] == 'gated'
if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
lowercase__ = 'gelu_new'
@property
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
return self.d_model
@property
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return self.num_heads
@property
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return self.num_layers
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase__ = 'past_encoder_sequence + sequence'
lowercase__ = {0: 'batch'}
lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return 13
@property
def UpperCAmelCase ( self : Optional[Any]) -> float:
"""simple docstring"""
return 5E-4
| 642
| 1
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _lowerCAmelCase ( A__ , A__=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def _lowerCAmelCase ( A__ , A__=0 ):
lowercase__ = []
for old_item in old_list:
lowercase__ = old_item.replace('in_layers.0' , 'norm1' )
lowercase__ = new_item.replace('in_layers.2' , 'conv1' )
lowercase__ = new_item.replace('out_layers.0' , 'norm2' )
lowercase__ = new_item.replace('out_layers.3' , 'conv2' )
lowercase__ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
lowercase__ = new_item.replace('skip_connection' , 'conv_shortcut' )
lowercase__ = shave_segments(A__ , n_shave_prefix_segments=A__ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def _lowerCAmelCase ( A__ , A__=0 ):
lowercase__ = []
for old_item in old_list:
lowercase__ = old_item
lowercase__ = new_item.replace('norm.weight' , 'group_norm.weight' )
lowercase__ = new_item.replace('norm.bias' , 'group_norm.bias' )
lowercase__ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
lowercase__ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
lowercase__ = shave_segments(A__ , n_shave_prefix_segments=A__ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def _lowerCAmelCase ( A__ , A__ , A__ , A__=None , A__=None , A__=None ):
assert isinstance(A__ , A__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowercase__ = old_checkpoint[path]
lowercase__ = old_tensor.shape[0] // 3
lowercase__ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowercase__ = old_tensor.shape[0] // config['num_head_channels'] // 3
lowercase__ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowercase__, lowercase__, lowercase__ = old_tensor.split(channels // num_heads , dim=1 )
lowercase__ = query.reshape(A__ )
lowercase__ = key.reshape(A__ )
lowercase__ = value.reshape(A__ )
for path in paths:
lowercase__ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowercase__ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
lowercase__ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
lowercase__ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
lowercase__ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowercase__ = old_checkpoint[path['old']][:, :, 0]
else:
lowercase__ = old_checkpoint[path['old']]
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = {}
lowercase__ = checkpoint['time_embed.0.weight']
lowercase__ = checkpoint['time_embed.0.bias']
lowercase__ = checkpoint['time_embed.2.weight']
lowercase__ = checkpoint['time_embed.2.bias']
lowercase__ = checkpoint['input_blocks.0.0.weight']
lowercase__ = checkpoint['input_blocks.0.0.bias']
lowercase__ = checkpoint['out.0.weight']
lowercase__ = checkpoint['out.0.bias']
lowercase__ = checkpoint['out.2.weight']
lowercase__ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
lowercase__ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
lowercase__ = {
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(A__ )
}
# Retrieves the keys for the middle blocks only
lowercase__ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
lowercase__ = {
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(A__ )
}
# Retrieves the keys for the output blocks only
lowercase__ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
lowercase__ = {
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(A__ )
}
for i in range(1 , A__ ):
lowercase__ = (i - 1) // (config['num_res_blocks'] + 1)
lowercase__ = (i - 1) % (config['num_res_blocks'] + 1)
lowercase__ = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
lowercase__ = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
lowercase__ = checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
lowercase__ = checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
lowercase__ = renew_resnet_paths(A__ )
lowercase__ = {'old': F'''input_blocks.{i}.0''', 'new': F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
lowercase__ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
A__ , A__ , A__ , additional_replacements=[meta_path, resnet_op] , config=A__ )
if len(A__ ):
lowercase__ = renew_attention_paths(A__ )
lowercase__ = {
'old': F'''input_blocks.{i}.1''',
'new': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase__ = {
F'''input_blocks.{i}.1.qkv.bias''': {
'key': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
'key': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
A__ , A__ , A__ , additional_replacements=[meta_path] , attention_paths_to_split=A__ , config=A__ , )
lowercase__ = middle_blocks[0]
lowercase__ = middle_blocks[1]
lowercase__ = middle_blocks[2]
lowercase__ = renew_resnet_paths(A__ )
assign_to_checkpoint(A__ , A__ , A__ , config=A__ )
lowercase__ = renew_resnet_paths(A__ )
assign_to_checkpoint(A__ , A__ , A__ , config=A__ )
lowercase__ = renew_attention_paths(A__ )
lowercase__ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
A__ , A__ , A__ , attention_paths_to_split=A__ , config=A__ )
for i in range(A__ ):
lowercase__ = i // (config['num_res_blocks'] + 1)
lowercase__ = i % (config['num_res_blocks'] + 1)
lowercase__ = [shave_segments(A__ , 2 ) for name in output_blocks[i]]
lowercase__ = {}
for layer in output_block_layers:
lowercase__, lowercase__ = layer.split('.' )[0], shave_segments(A__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(A__ )
else:
lowercase__ = [layer_name]
if len(A__ ) > 1:
lowercase__ = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
lowercase__ = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
lowercase__ = renew_resnet_paths(A__ )
lowercase__ = renew_resnet_paths(A__ )
lowercase__ = {'old': F'''output_blocks.{i}.0''', 'new': F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(A__ , A__ , A__ , additional_replacements=[meta_path] , config=A__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowercase__ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
lowercase__ = checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
lowercase__ = checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(A__ ) == 2:
lowercase__ = []
if len(A__ ):
lowercase__ = renew_attention_paths(A__ )
lowercase__ = {
'old': F'''output_blocks.{i}.1''',
'new': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase__ = {
F'''output_blocks.{i}.1.qkv.bias''': {
'key': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
'key': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
A__ , A__ , A__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=A__ , )
else:
lowercase__ = renew_resnet_paths(A__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowercase__ = '.'.join(['output_blocks', str(A__ ), path['old']] )
lowercase__ = '.'.join(['up_blocks', str(A__ ), 'resnets', str(A__ ), path['new']] )
lowercase__ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
a__ : List[str] = parser.parse_args()
a__ : Union[str, Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
a__ : Optional[Any] = json.loads(f.read())
a__ : List[str] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
a__ : Union[str, Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
a__ : Any = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
a__ : Optional[int] = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
a__ : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 642
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : str = XGLMTokenizer
A : List[Any] = XGLMTokenizerFast
A : int = True
A : Optional[Any] = True
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = '<pad>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(len(lowerCAmelCase) , 10_08)
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_08)
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
lowercase__ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M')
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name)
lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase)
lowercase__ = pickle.dumps(lowerCAmelCase)
pickle.loads(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
lowercase__ = 'Hello World!'
lowercase__ = [2, 3_12_27, 44_47, 35]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = {
'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
| 642
| 1
|
import argparse
from collections import defaultdict
import yaml
a__ : Optional[Any] = "docs/source/en/_toctree.yml"
def _lowerCAmelCase ( A__ ):
lowercase__ = defaultdict(A__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase__ = [key for key, value in counts.items() if value > 1]
lowercase__ = []
for duplicate_key in duplicates:
lowercase__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(A__ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(A__ , key=lambda A__ : s["title"].lower() )
def _lowerCAmelCase ( A__=False ):
with open(A__ , encoding='utf-8' ) as f:
lowercase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__ = content[api_idx]['sections']
# Then to the model doc
lowercase__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase__ = api_doc[model_idx]['sections']
lowercase__ = [(idx, section) for idx, section in enumerate(A__ ) if 'sections' in section]
lowercase__ = False
for idx, modality_doc in modalities_docs:
lowercase__ = modality_doc['sections']
lowercase__ = clean_model_doc_toc(A__ )
if old_modality_doc != new_modality_doc:
lowercase__ = True
if overwrite:
lowercase__ = new_modality_doc
if diff:
if overwrite:
lowercase__ = model_doc
lowercase__ = api_doc
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(A__ , allow_unicode=A__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a__ : Any = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 642
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = data
lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64)
lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data))
return padded_data
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]:
"""simple docstring"""
lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64
for i in range(16 , 80):
lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.padding()
lowercase__ = self.split_blocks()
for block in self.blocks:
lowercase__ = self.expand_block(lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h
for i in range(0 , 80):
if 0 <= i < 20:
lowercase__ = (b & c) | ((~b) & d)
lowercase__ = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase__ = b ^ c ^ d
lowercase__ = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
lowercase__ = (b & c) | (b & d) | (c & d)
lowercase__ = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
lowercase__ = b ^ c ^ d
lowercase__ = 0XC_A_6_2_C_1_D_6
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = (
self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(lowerCAmelCase , 30),
c,
d,
)
lowercase__ = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h)
def _lowerCAmelCase ( ):
lowercase__ = B'Test String'
assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(A__ , 'utf-8' )
print(SHAaHash(A__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 642
| 1
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a__ : List[str] = logging.get_logger(__name__)
a__ : int = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
a__ : List[str] = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
a__ : Dict = {"facebook/blenderbot-3B": 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _lowerCAmelCase ( ):
lowercase__ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowercase__ = bs[:]
lowercase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
lowercase__ = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def _lowerCAmelCase ( A__ ):
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
return pairs
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = VOCAB_FILES_NAMES
A : Tuple = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str]="replace" , lowerCAmelCase : List[Any]="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Tuple="</s>" , lowerCAmelCase : Tuple="<s>" , lowerCAmelCase : List[Any]="<unk>" , lowerCAmelCase : Optional[Any]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Any=False , **lowerCAmelCase : List[str] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else bos_token
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else eos_token
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else sep_token
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else cls_token
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else unk_token
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else mask_token
super().__init__(
errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , )
with open(lowerCAmelCase , encoding='utf-8') as vocab_handle:
lowercase__ = json.load(lowerCAmelCase)
lowercase__ = {v: k for k, v in self.encoder.items()}
lowercase__ = errors # how to handle errors in decoding
lowercase__ = bytes_to_unicode()
lowercase__ = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase , encoding='utf-8') as merges_handle:
lowercase__ = merges_handle.read().split('\n')[1:-1]
lowercase__ = [tuple(merge.split()) for merge in bpe_merges]
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = {}
lowercase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
return len(self.encoder)
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = get_pairs(lowerCAmelCase)
if not pairs:
return token
while True:
lowercase__ = min(lowerCAmelCase , key=lambda lowerCAmelCase: self.bpe_ranks.get(lowerCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__, lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(lowerCAmelCase):
try:
lowercase__ = word.index(lowerCAmelCase , lowerCAmelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
lowercase__ = j
if word[i] == first and i < len(lowerCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = new_word
if len(lowerCAmelCase) == 1:
break
else:
lowercase__ = get_pairs(lowerCAmelCase)
lowercase__ = ' '.join(lowerCAmelCase)
lowercase__ = word
return word
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> Optional[Any]:
"""simple docstring"""
lowercase__ = []
for token in re.findall(self.pat , lowerCAmelCase):
lowercase__ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase).split(' '))
return bpe_tokens
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> Union[str, Any]:
"""simple docstring"""
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]) -> Dict:
"""simple docstring"""
lowercase__ = ''.join(lowerCAmelCase)
lowercase__ = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(lowerCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase) + '\n')
lowercase__ = 0
with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!')
lowercase__ = token_index
writer.write(' '.join(lowerCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase)) + [1]
return [1] + ([0] * len(lowerCAmelCase)) + [1, 1] + ([0] * len(lowerCAmelCase)) + [1]
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple=False , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
lowercase__ = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase) > 0 and not text[0].isspace()):
lowercase__ = ' ' + text
return (text, kwargs)
def UpperCAmelCase ( self : str , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : "Conversation") -> List[int]:
"""simple docstring"""
lowercase__ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(lowerCAmelCase)
lowercase__ = ' '.join(lowerCAmelCase)
lowercase__ = self.encode(lowerCAmelCase)
if len(lowerCAmelCase) > self.model_max_length:
lowercase__ = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''')
return input_ids
| 642
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a__ : List[Any] = logging.get_logger(__name__)
a__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
a__ : List[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
a__ : int = {
"facebook/bart-base": 10_24,
"facebook/bart-large": 10_24,
"facebook/bart-large-mnli": 10_24,
"facebook/bart-large-cnn": 10_24,
"facebook/bart-large-xsum": 10_24,
"yjernite/bart_eli5": 10_24,
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = ["input_ids", "attention_mask"]
A : Any = BartTokenizer
def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="replace" , lowerCAmelCase : str="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = getattr(lowerCAmelCase , pre_tok_state.pop('type'))
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**lowerCAmelCase)
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = 'post_processor'
lowercase__ = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state['sep'])
if "cls" in state:
lowercase__ = tuple(state['cls'])
lowercase__ = False
if state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get('trim_offsets' , lowerCAmelCase) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(lowerCAmelCase , state.pop('type'))
lowercase__ = component_class(**lowerCAmelCase)
setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
@property
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else value
lowercase__ = value
def UpperCAmelCase ( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None) -> Tuple:
"""simple docstring"""
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 642
| 1
|
def _lowerCAmelCase ( A__ , A__ , A__ ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
lowercase__ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowercase__ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : str = (DDIMParallelScheduler,)
A : Any = (("eta", 0.0), ("num_inference_steps", 50))
def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowerCAmelCase)
return config
def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCAmelCase)
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase)
for t in scheduler.timesteps:
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample
return sample
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase)
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1)
lowercase__ = scheduler_class(**lowerCAmelCase)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1]))
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]):
self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = self.dummy_sample_deter + 0.1
lowercase__ = self.dummy_sample_deter - 0.1
lowercase__ = samplea.shape[0]
lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0)
lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase)
lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 11_47.79_04) < 1E-2
assert abs(result_mean.item() - 0.49_82) < 1E-3
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_72.00_67) < 1E-2
assert abs(result_mean.item() - 0.22_39_67) < 1E-3
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(prediction_type='v_prediction')
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 52.53_02) < 1E-2
assert abs(result_mean.item() - 0.06_84) < 1E-3
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.82_95) < 1E-2
assert abs(result_mean.item() - 0.19_51) < 1E-3
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.07_84) < 1E-2
assert abs(result_mean.item() - 0.19_41) < 1E-3
| 642
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Tuple = ["image_processor", "tokenizer"]
A : List[Any] = "ChineseCLIPImageProcessor"
A : List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : List[Any] , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]=None , **lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase , )
lowercase__ = kwargs.pop('feature_extractor')
lowercase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self.image_processor
def __call__( self : Dict , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : int=None , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
lowercase__ = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
if images is not None:
lowercase__ = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
if text is not None and images is not None:
lowercase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase) , tensor_type=lowerCAmelCase)
def UpperCAmelCase ( self : Tuple , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Dict , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : int) -> int:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase)
@property
def UpperCAmelCase ( self : List[str]) -> Dict:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase , )
return self.image_processor_class
| 642
|
import cva
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : float , lowerCAmelCase : int) -> Dict:
"""simple docstring"""
if k in (0.04, 0.06):
lowercase__ = k
lowercase__ = window_size
else:
raise ValueError('invalid k value')
def __str__( self : Tuple) -> str:
"""simple docstring"""
return str(self.k)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowercase__ = cva.imread(lowerCAmelCase , 0)
lowercase__, lowercase__ = img.shape
lowercase__ = []
lowercase__ = img.copy()
lowercase__ = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB)
lowercase__, lowercase__ = np.gradient(lowerCAmelCase)
lowercase__ = dx**2
lowercase__ = dy**2
lowercase__ = dx * dy
lowercase__ = 0.04
lowercase__ = self.window_size // 2
for y in range(lowerCAmelCase , h - offset):
for x in range(lowerCAmelCase , w - offset):
lowercase__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = (wxx * wyy) - (wxy**2)
lowercase__ = wxx + wyy
lowercase__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 2_55)
return color_img, corner_list
if __name__ == "__main__":
a__ : Dict = HarrisCorner(0.0_4, 3)
a__ , a__ : Dict = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 642
| 1
|
import argparse
from collections import defaultdict
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ ):
lowercase__ = F'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(A__ , 'r' ) as f:
lowercase__ = f.readlines()
lowercase__ = F'''class {class_name}('''
lowercase__ = F'''{4 * ' '}def {test_name}('''
lowercase__ = F'''{8 * ' '}{correct_line.split()[0]}'''
lowercase__ = F'''{16 * ' '}{correct_line.split()[0]}'''
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = 0
lowercase__ = 0
lowercase__ = []
for line in lines:
if line.startswith(A__ ):
lowercase__ = True
elif in_class and line.startswith(A__ ):
lowercase__ = True
elif in_class and in_func and (line.startswith(A__ ) or line.startswith(A__ )):
lowercase__ = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowercase__ = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowercase__ = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'''{spaces * ' '}{correct_line}''' )
lowercase__ = lowercase__ = lowercase__ = lowercase__ = False
else:
new_lines.append(A__ )
with open(A__ , 'w' ) as f:
for line in new_lines:
f.write(A__ )
def _lowerCAmelCase ( A__ , A__=None ):
if fail is not None:
with open(A__ , 'r' ) as f:
lowercase__ = {l.strip() for l in f.readlines()}
else:
lowercase__ = None
with open(A__ , 'r' ) as f:
lowercase__ = f.readlines()
lowercase__ = defaultdict(A__ )
for line in correct_lines:
lowercase__, lowercase__, lowercase__, lowercase__ = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(A__ , A__ , A__ , A__ , A__ )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
a__ : Dict = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : List[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : int = "speech_to_text"
A : Optional[Any] = ["past_key_values"]
A : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[int] , lowerCAmelCase : Tuple=1_00_00 , lowerCAmelCase : int=12 , lowerCAmelCase : int=20_48 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : str=6 , lowerCAmelCase : Dict=20_48 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict="relu" , lowerCAmelCase : Tuple=2_56 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Any=60_00 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=(5, 5) , lowerCAmelCase : Union[str, Any]=10_24 , lowerCAmelCase : List[Any]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
lowercase__ = num_conv_layers
lowercase__ = list(lowerCAmelCase)
lowercase__ = conv_channels
lowercase__ = input_feat_per_channel
lowercase__ = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''')
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 642
| 1
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
a__ : List[Any] = logging.get_logger(__name__)
class UpperCAmelCase__( _lowerCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> None:
"""simple docstring"""
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , A__ , )
super().__init__(*A__ , **A__)
| 700
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 0
|
import os
from datetime import datetime as dt
from github import Github
a__ : int = [
"good first issue",
"feature request",
"wip",
]
def _lowerCAmelCase ( ):
lowercase__ = Github(os.environ['GITHUB_TOKEN'] )
lowercase__ = g.get_repo('huggingface/accelerate' )
lowercase__ = repo.get_issues(state='open' )
for issue in open_issues:
lowercase__ = sorted([comment for comment in issue.get_comments()] , key=lambda A__ : i.created_at , reverse=__A )
lowercase__ = comments[0] if len(__A ) > 0 else None
lowercase__ = dt.utcnow()
lowercase__ = (current_time - issue.updated_at).days
lowercase__ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 701
|
# Imports
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None) -> Dict:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : str=None) -> int:
"""simple docstring"""
if red is not None:
lowercase__ = red
if green is not None:
lowercase__ = green
if blue is not None:
lowercase__ = blue
if red_edge is not None:
lowercase__ = red_edge
if nir is not None:
lowercase__ = nir
return True
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, Any]="" , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None) -> Union[str, Any]:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
lowercase__ = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!')
return False
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]=0.08 , lowerCAmelCase : Optional[int]=1.22 , lowerCAmelCase : int=0.03) -> List[Any]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
return self.nir - self.green
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def UpperCAmelCase ( self : int , lowerCAmelCase : int=0.16) -> Dict:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[int]=0.5) -> Union[str, Any]:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None) -> Tuple:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
lowercase__ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
lowercase__ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 642
| 0
|
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a__ : Union[str, Any] = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
a__ : Any = 'hopper-medium-v2'
a__ : Union[str, Any] = gym.make(env_name)
a__ : int = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
a__ : Tuple = env.reset()
a__ : int = 0
a__ : List[Any] = 0
a__ : Optional[int] = 10_00
a__ : Optional[int] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a__ : Any = pipeline(obs, planning_horizon=32)
# execute action in environment
a__ : Union[str, Any] = env.step(denorm_actions)
a__ : str = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
a__ : Optional[int] = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 702
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = load_tool('text-classification')
self.tool.setup()
lowercase__ = load_tool('text-classification' , remote=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
lowercase__ = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
lowercase__ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
| 642
| 0
|
a__ : List[str] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = set()
# keep track of all the paths to be checked
lowercase__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase__ = queue.pop(0 )
# get the last node from the path
lowercase__ = path[-1]
if node not in explored:
lowercase__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase__ = list(A__ )
new_path.append(A__ )
queue.append(A__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A__ )
# in case there's no path between the 2 nodes
return []
def _lowerCAmelCase ( A__ , A__ , A__ ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase__ = [start]
lowercase__ = set(A__ )
# Keep tab on distances from `start` node.
lowercase__ = {start: 0, target: -1}
while queue:
lowercase__ = queue.pop(0 )
if node == target:
lowercase__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A__ )
queue.append(A__ )
lowercase__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 703
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = None
A : Optional[int] = None
@property
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size'))
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate'))
self.assertTrue(hasattr(lowerCAmelCase , 'padding_value'))
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name])))
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = self.feat_extract_tester.seq_length_diff
lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowercase__ = self.feat_extract_tester.min_seq_length
lowercase__ = self.feat_extract_tester.batch_size
lowercase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest')
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1]))
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
lowercase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
# truncate to smallest
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]))
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to smallest with np
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to middle
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = 12
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , )
lowercase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase__ = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
@require_torch
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = min(lowerCAmelCase)
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 642
| 0
|
from typing import Any
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : str , lowerCAmelCase : Any) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = data
lowercase__ = None
def __repr__( self : List[str]) -> str:
"""simple docstring"""
return f'''Node({self.data})'''
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[str]) -> Optional[int]:
"""simple docstring"""
lowercase__ = None
def __iter__( self : List[Any]) -> Any:
"""simple docstring"""
lowercase__ = self.head
while node:
yield node.data
lowercase__ = node.next
def __len__( self : str) -> int:
"""simple docstring"""
return sum(1 for _ in self)
def __repr__( self : List[str]) -> str:
"""simple docstring"""
return "->".join([str(UpperCamelCase_) for item in self])
def __getitem__( self : Union[str, Any] , lowerCAmelCase : int) -> Any:
"""simple docstring"""
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
for i, node in enumerate(self):
if i == index:
return node
return None
def __setitem__( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Any) -> None:
"""simple docstring"""
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
lowercase__ = self.head
for _ in range(UpperCamelCase_):
lowercase__ = current.next
lowercase__ = data
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any) -> None:
"""simple docstring"""
self.insert_nth(len(self) , UpperCamelCase_)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Any) -> None:
"""simple docstring"""
self.insert_nth(0 , UpperCamelCase_)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Any) -> None:
"""simple docstring"""
if not 0 <= index <= len(self):
raise IndexError('list index out of range')
lowercase__ = Node(UpperCamelCase_)
if self.head is None:
lowercase__ = new_node
elif index == 0:
lowercase__ = self.head # link new_node to head
lowercase__ = new_node
else:
lowercase__ = self.head
for _ in range(index - 1):
lowercase__ = temp.next
lowercase__ = temp.next
lowercase__ = new_node
def UpperCAmelCase ( self : Optional[int]) -> None: # print every node data
"""simple docstring"""
print(self)
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
return self.delete_nth(0)
def UpperCAmelCase ( self : Union[str, Any]) -> Any: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self) - 1)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int = 0) -> Any:
"""simple docstring"""
if not 0 <= index <= len(self) - 1: # test if index is valid
raise IndexError('List index out of range.')
lowercase__ = self.head # default first node
if index == 0:
lowercase__ = self.head.next
else:
lowercase__ = self.head
for _ in range(index - 1):
lowercase__ = temp.next
lowercase__ = temp.next
lowercase__ = temp.next.next
return delete_node.data
def UpperCAmelCase ( self : Tuple) -> bool:
"""simple docstring"""
return self.head is None
def UpperCAmelCase ( self : Optional[int]) -> None:
"""simple docstring"""
lowercase__ = None
lowercase__ = self.head
while current:
# Store the current node's next node.
lowercase__ = current.next
# Make the current node's next point backwards
lowercase__ = prev
# Make the previous node be the current node
lowercase__ = current
# Make the current node the next node (to progress iteration)
lowercase__ = next_node
# Return prev in order to put the head at the end
lowercase__ = prev
def _lowerCAmelCase ( ):
lowercase__ = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowercase ) == i
linked_list.insert_nth(_lowercase , i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowercase ) == 9
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowercase__ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) )
def _lowerCAmelCase ( ):
lowercase__ = [
-9,
100,
Node(77_345_112 ),
'dlrow olleH',
7,
5_555,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowercase__ = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowercase__ = linked_list.delete_head()
assert result == -9
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowercase__ = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowercase__ = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowercase )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _lowerCAmelCase ( ):
from doctest import testmod
testmod()
lowercase__ = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_lowercase )
print('\nReading/changing Node data using indexing:' )
print(F'''Element at Position 1: {linked_list[1]}''' )
lowercase__ = input('Enter New Value: ' ).strip()
print('New list:' )
print(_lowercase )
print(F'''length of linked_list is : {len(_lowercase )}''' )
if __name__ == "__main__":
main()
| 704
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowerCAmelCase ( A__ ):
lowercase__ = prime_factors(A__ )
if is_square_free(A__ ):
return -1 if len(A__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ : List[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ["CLIPFeatureExtractor"]
a__ : Any = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : List[str] = logging.get_logger(__name__)
a__ : List[Any] = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
A : List[str] = "focalnet"
def __init__( self : Dict , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=96 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int=[1_92, 3_84, 7_68, 7_68] , lowerCAmelCase : str=[2, 2, 6, 2] , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[3, 3, 3, 3] , lowerCAmelCase : int="gelu" , lowerCAmelCase : Any=4.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Tuple=1E-4 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = use_conv_embed
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = focal_levels
lowercase__ = focal_windows
lowercase__ = hidden_act
lowercase__ = mlp_ratio
lowercase__ = hidden_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = use_layerscale
lowercase__ = layerscale_value
lowercase__ = use_post_layernorm
lowercase__ = use_post_layernorm_in_modulation
lowercase__ = normalize_modulator
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = encoder_stride
lowercase__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)]
lowercase__, lowercase__ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
| 642
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Any) -> Dict:
"""simple docstring"""
lowercase__ = XLMRobertaModel.from_pretrained('xlm-roberta-base')
lowercase__ = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]])
# The dog is cute and lives in the garden house
lowercase__ = torch.Size((1, 12, 7_68)) # batch_size, sequence_length, embedding_vector_dim
lowercase__ = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase__ = model(UpperCAmelCase_)['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase_)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1E-3))
@slow
def UpperCAmelCase ( self : List[Any]) -> Any:
"""simple docstring"""
lowercase__ = XLMRobertaModel.from_pretrained('xlm-roberta-large')
lowercase__ = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]])
# The dog is cute and lives in the garden house
lowercase__ = torch.Size((1, 12, 10_24)) # batch_size, sequence_length, embedding_vector_dim
lowercase__ = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase__ = model(UpperCAmelCase_)['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase_)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1E-3))
| 706
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
a__ : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
a__ : Any = {"facebook/blenderbot_small-90M": 5_12}
def _lowerCAmelCase ( A__ ):
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
lowercase__ = set(A__ )
return pairs
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[str] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : int="__start__" , lowerCAmelCase : Dict="__end__" , lowerCAmelCase : Any="__unk__" , lowerCAmelCase : str="__null__" , **lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase)
with open(lowerCAmelCase , encoding='utf-8') as vocab_handle:
lowercase__ = json.load(lowerCAmelCase)
lowercase__ = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase , encoding='utf-8') as merges_handle:
lowercase__ = merges_handle.read().split('\n')[1:-1]
lowercase__ = [tuple(merge.split()) for merge in merges]
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = {}
@property
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return len(self.encoder)
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self : str , lowerCAmelCase : str) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ = re.sub('([.,!?()])' , R' \1' , lowerCAmelCase)
lowercase__ = re.sub('(\')' , R' \1 ' , lowerCAmelCase)
lowercase__ = re.sub(R'\s{2,}' , ' ' , lowerCAmelCase)
if "\n" in token:
lowercase__ = token.replace('\n' , ' __newln__')
lowercase__ = token.split(' ')
lowercase__ = []
for token in tokens:
if not len(lowerCAmelCase):
continue
lowercase__ = token.lower()
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = tuple(list(word[:-1]) + [word[-1] + '</w>'])
lowercase__ = get_pairs(lowerCAmelCase)
if not pairs:
words.append(lowerCAmelCase)
continue
while True:
lowercase__ = min(lowerCAmelCase , key=lambda lowerCAmelCase: self.bpe_ranks.get(lowerCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__, lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(lowerCAmelCase):
try:
lowercase__ = word.index(lowerCAmelCase , lowerCAmelCase)
new_word.extend(word[i:j])
lowercase__ = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowerCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = new_word
if len(lowerCAmelCase) == 1:
break
else:
lowercase__ = get_pairs(lowerCAmelCase)
lowercase__ = '@@ '.join(lowerCAmelCase)
lowercase__ = word[:-4]
lowercase__ = word
words.append(lowerCAmelCase)
return " ".join(lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = re.findall(R'\S+\n?' , lowerCAmelCase)
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase).split(' ')))
return split_tokens
def UpperCAmelCase ( self : int , lowerCAmelCase : str) -> int:
"""simple docstring"""
lowercase__ = token.lower()
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[str]) -> str:
"""simple docstring"""
lowercase__ = ' '.join(lowerCAmelCase).replace('@@ ' , '').strip()
return out_string
def UpperCAmelCase ( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(lowerCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase) + '\n')
lowercase__ = 0
with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!')
lowercase__ = token_index
writer.write(' '.join(lowerCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
| 642
| 0
|
a__ : Dict = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a__ : list[bool | None] = [None] * 10_00_00_00
a__ : Optional[Any] = True
a__ : List[Any] = False
def _lowerCAmelCase ( A__ ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase__ = chain(next_number(A__ ) )
lowercase__ = number_chain
while number < 10_000_000:
lowercase__ = number_chain
number *= 10
return number_chain
def _lowerCAmelCase ( A__ = 10_000_000 ):
for i in range(1 , A__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 707
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 0
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int]=13 , lowerCAmelCase : str=32 , lowerCAmelCase : Any=2 , lowerCAmelCase : Dict=3 , lowerCAmelCase : str=16 , lowerCAmelCase : Union[str, Any]=[1, 2, 1] , lowerCAmelCase : List[str]=[2, 2, 4] , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Tuple=2.0 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : str="gelu" , lowerCAmelCase : Dict=False , lowerCAmelCase : Tuple=True , lowerCAmelCase : str=0.02 , lowerCAmelCase : List[str]=1E-5 , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=10 , lowerCAmelCase : Dict=8 , lowerCAmelCase : str=["stage1", "stage2", "stage3"] , lowerCAmelCase : List[str]=[1, 2, 3] , ) -> Any:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = patch_norm
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = is_training
lowercase__ = scope
lowercase__ = use_labels
lowercase__ = type_sequence_label_size
lowercase__ = encoder_stride
lowercase__ = out_features
lowercase__ = out_indices
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase ( self : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Any) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = MaskFormerSwinModel(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(lowerCAmelCase)
lowercase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
lowercase__ = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = MaskFormerSwinBackbone(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(lowerCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [13, 16, 16, 16])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [16, 32, 64])
# verify ValueError
with self.parent.assertRaises(lowerCAmelCase):
lowercase__ = ['''stem''']
lowercase__ = MaskFormerSwinBackbone(config=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__( __A , __A , unittest.TestCase ):
'''simple docstring'''
A : str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
A : Any = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
A : Any = False
A : Dict = False
A : Optional[int] = False
A : Dict = False
A : List[Any] = False
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
lowercase__ = MaskFormerSwinModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase , embed_dim=37)
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
))
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
return
def UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase)
@unittest.skip('Swin does not use inputs_embeds')
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking')
def UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
pass
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear))
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCAmelCase)
lowercase__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase)
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions')
def UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone')
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Dict) -> Any:
"""simple docstring"""
lowercase__ = model_class(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase))
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths) + 1)
self.assertEqual(len(lowerCAmelCase) , lowerCAmelCase)
# Swin has a different seq_length
lowercase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
lowercase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
lowercase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , (padded_height, padded_width))
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints')
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin')
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin')
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCAmelCase : List[str]):
lowercase__ = 0
return t
def check_equivalence(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int]={}):
with torch.no_grad():
lowercase__ = model(**lowerCAmelCase , return_dict=lowerCAmelCase , **lowerCAmelCase)
lowercase__ = model(**lowerCAmelCase , return_dict=lowerCAmelCase , **lowerCAmelCase).to_tuple()
def recursive_check(lowerCAmelCase : int , lowerCAmelCase : Tuple):
if isinstance(lowerCAmelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase , lowerCAmelCase):
recursive_check(lowerCAmelCase , lowerCAmelCase)
elif isinstance(lowerCAmelCase , lowerCAmelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(lowerCAmelCase , lowerCAmelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowerCAmelCase) , set_nan_tensor_to_zero(lowerCAmelCase) , atol=1E-5) , msg=(
'Tuple and dict output are not equal. Difference:'
f''' {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:'''
f''' {torch.isnan(lowerCAmelCase).any()} and `inf`: {torch.isinf(lowerCAmelCase)}. Dict has'''
f''' `nan`: {torch.isnan(lowerCAmelCase).any()} and `inf`: {torch.isinf(lowerCAmelCase)}.'''
) , )
recursive_check(lowerCAmelCase , lowerCAmelCase)
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase)
check_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
lowercase__ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase)
lowercase__ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase)
check_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
lowercase__ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase)
check_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , {'output_hidden_states': True})
lowercase__ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase)
lowercase__ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase)
check_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , {'output_hidden_states': True})
@require_torch
class UpperCAmelCase__( unittest.TestCase , __A ):
'''simple docstring'''
A : Tuple = (MaskFormerSwinBackbone,) if is_torch_available() else ()
A : int = MaskFormerSwinConfig
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = MaskFormerSwinModelTester(self)
def UpperCAmelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ = backbone_class(lowerCAmelCase)
backbone.to(lowerCAmelCase)
backbone.eval()
lowercase__ = backbone(**lowerCAmelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowerCAmelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
lowercase__ = backbone(**lowerCAmelCase , output_hidden_states=lowerCAmelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
lowercase__ = backbone(**lowerCAmelCase , output_attentions=lowerCAmelCase)
self.assertIsNotNone(outputs.attentions)
| 708
|
import heapq
import sys
import numpy as np
a__ : Dict = tuple[int, int]
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = []
lowercase__ = set()
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf')
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return len(self.elements) == 0
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(lowerCAmelCase)
else:
# update
# print("update", item)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
if item in self.set:
self.set.remove(lowerCAmelCase)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
self.set.remove(lowerCAmelCase)
return (priority, item)
def _lowerCAmelCase ( A__ , A__ ):
# euclidean distance
lowercase__ = np.array(A__ )
lowercase__ = np.array(A__ )
return np.linalg.norm(a - b )
def _lowerCAmelCase ( A__ , A__ ):
# integer division by time variable
return consistent_heuristic(A__ , A__ ) // t
def _lowerCAmelCase ( A__ , A__ ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ )
return ans
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = np.chararray((n, n) )
for i in range(A__ ):
for j in range(A__ ):
lowercase__ = '*'
for i in range(A__ ):
for j in range(A__ ):
if (j, (n - 1) - i) in blocks:
lowercase__ = '#'
lowercase__ = '-'
lowercase__ = back_pointer[goal]
while x != start:
((lowercase__), (lowercase__)) = x
# print(x)
lowercase__ = '-'
lowercase__ = back_pointer[x]
lowercase__ = '-'
for i in range(A__ ):
for j in range(A__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowercase__ = back_pointer[goal]
while x != start:
print(A__ , end=' ' )
lowercase__ = back_pointer[x]
print(A__ )
sys.exit()
def _lowerCAmelCase ( A__ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
for itera in range(A__ ):
open_list[itera].remove_element(A__ )
# print("s", s)
# print("j", j)
((lowercase__), (lowercase__)) = s
lowercase__ = (x - 1, y)
lowercase__ = (x + 1, y)
lowercase__ = (x, y + 1)
lowercase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(A__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(A__ )
lowercase__ = -1
lowercase__ = float('inf' )
if valid(A__ ) and g_function[neighbours] > g_function[s] + 1:
lowercase__ = g_function[s] + 1
lowercase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) )
if neighbours not in close_list_inad:
for var in range(1 , A__ ):
if key(A__ , A__ , A__ , A__ ) <= Wa * key(
A__ , 0 , A__ , A__ ):
open_list[j].put(
A__ , key(A__ , A__ , A__ , A__ ) )
def _lowerCAmelCase ( ):
lowercase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a__ : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a__ : Any = make_common_ground()
a__ : Union[str, Any] = blocks_blk
# hyper parameters
a__ : List[Any] = 1
a__ : List[str] = 1
a__ : Optional[int] = 20
a__ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a__ : Tuple = (0, 0)
a__ : str = (n - 1, n - 1)
a__ : Optional[Any] = 1
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = {start: 0, goal: float('inf' )}
lowercase__ = {start: -1, goal: -1}
lowercase__ = []
lowercase__ = set()
for i in range(A__ ):
open_list.append(PriorityQueue() )
open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) )
lowercase__ = []
lowercase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , A__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__, lowercase__ = open_list[i].top_show()
visited.add(A__ )
expand_state(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_inad.append(A__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__ = open_list[0].top_show()
visited.add(A__ )
expand_state(
A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_anchor.append(A__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(A__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 642
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a__ : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = XLNetTokenizer
A : int = XLNetTokenizerFast
A : List[Any] = True
A : Any = True
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XLNetTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = '<s>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase) , _lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase) , _lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<unk>')
self.assertEqual(vocab_keys[1] , '<s>')
self.assertEqual(vocab_keys[-1] , '<eod>')
self.assertEqual(len(_lowerCAmelCase) , 10_06)
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_00)
def UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
lowercase__ = XLNetTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase)
lowercase__ = tokenizer.tokenize('This is a test')
self.assertListEqual(_lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase) , [2_85, 46, 10, 1_70, 3_82])
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4])
lowercase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase)
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = XLNetTokenizer(_lowerCAmelCase , do_lower_case=_lowerCAmelCase)
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['▁he', 'll', 'o'])
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = XLNetTokenizer(_lowerCAmelCase , do_lower_case=_lowerCAmelCase)
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
@slow
def UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
lowercase__ = XLNetTokenizer.from_pretrained('xlnet-base-cased')
lowercase__ = tokenizer.encode('sequence builders' , add_special_tokens=_lowerCAmelCase)
lowercase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_lowerCAmelCase)
lowercase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase)
lowercase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = {'input_ids': [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
| 709
|
import math
import sys
def _lowerCAmelCase ( A__ ):
lowercase__ = ''
try:
with open(A__ , 'rb' ) as binary_file:
lowercase__ = binary_file.read()
for dat in data:
lowercase__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = {'0': '0', '1': '1'}
lowercase__, lowercase__ = '', ''
lowercase__ = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__ = lexicon[curr_string]
result += last_match_id
lowercase__ = last_match_id + '0'
if math.loga(A__ ).is_integer():
lowercase__ = {}
for curr_key in list(A__ ):
lowercase__ = lexicon.pop(A__ )
lowercase__ = new_lex
lowercase__ = last_match_id + '1'
index += 1
lowercase__ = ''
return result
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 8
try:
with open(A__ , 'wb' ) as opened_file:
lowercase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase__ = data_bits[counter:]
lowercase__ = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = read_file_binary(A__ )
lowercase__ = remove_prefix(A__ )
lowercase__ = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 642
| 0
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Dict = (PNDMScheduler,)
A : List[Any] = (("num_inference_steps", 50),)
def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> List[Any]:
"""simple docstring"""
lowercase__ = {
"num_train_timesteps": 10_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**UpperCamelCase_)
return config
def UpperCAmelCase ( self : int , lowerCAmelCase : str=0 , **lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
lowercase__ = dict(self.forward_default_kwargs)
lowercase__ = kwargs.pop('num_inference_steps' , UpperCamelCase_)
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**UpperCamelCase_)
lowercase__ = scheduler_class(**UpperCamelCase_)
scheduler.set_timesteps(UpperCamelCase_)
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_)
lowercase__ = scheduler_class.from_pretrained(UpperCamelCase_)
new_scheduler.set_timesteps(UpperCamelCase_)
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_).prev_sample
lowercase__ = new_scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_).prev_sample
lowercase__ = new_scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Optional[Any]=0 , **lowerCAmelCase : Dict) -> str:
"""simple docstring"""
lowercase__ = dict(self.forward_default_kwargs)
lowercase__ = kwargs.pop('num_inference_steps' , UpperCamelCase_)
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**UpperCamelCase_)
scheduler.set_timesteps(UpperCamelCase_)
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_)
lowercase__ = scheduler_class.from_pretrained(UpperCamelCase_)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase_)
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_).prev_sample
lowercase__ = new_scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_).prev_sample
lowercase__ = new_scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : str , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**UpperCamelCase_)
lowercase__ = scheduler_class(**UpperCamelCase_)
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_)
for i, t in enumerate(scheduler.prk_timesteps):
lowercase__ = model(UpperCamelCase_ , UpperCamelCase_)
lowercase__ = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
lowercase__ = model(UpperCamelCase_ , UpperCamelCase_)
lowercase__ = scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_).prev_sample
return sample
def UpperCAmelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = dict(self.forward_default_kwargs)
lowercase__ = kwargs.pop('num_inference_steps' , UpperCamelCase_)
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**UpperCamelCase_)
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase_ , 'set_timesteps'):
scheduler.set_timesteps(UpperCamelCase_)
elif num_inference_steps is not None and not hasattr(UpperCamelCase_ , 'set_timesteps'):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step_prk(UpperCamelCase_ , 0 , UpperCamelCase_ , **UpperCamelCase_).prev_sample
lowercase__ = scheduler.step_prk(UpperCamelCase_ , 1 , UpperCamelCase_ , **UpperCamelCase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
lowercase__ = scheduler.step_plms(UpperCamelCase_ , 0 , UpperCamelCase_ , **UpperCamelCase_).prev_sample
lowercase__ = scheduler.step_plms(UpperCamelCase_ , 1 , UpperCamelCase_ , **UpperCamelCase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_)
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase_)
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1)
lowercase__ = scheduler_class(**UpperCamelCase_)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02]):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_)
def UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_)
def UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=UpperCamelCase_)
def UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=UpperCamelCase_)
def UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = 27
for scheduler_class in self.scheduler_classes:
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**UpperCamelCase_)
scheduler.set_timesteps(UpperCamelCase_)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
lowercase__ = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_).prev_sample
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
with self.assertRaises(UpperCamelCase_):
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**UpperCamelCase_)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(UpperCamelCase_))
lowercase__ = torch.mean(torch.abs(UpperCamelCase_))
assert abs(result_sum.item() - 1_98.13_18) < 1E-2
assert abs(result_mean.item() - 0.25_80) < 1E-3
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = self.full_loop(prediction_type='v_prediction')
lowercase__ = torch.sum(torch.abs(UpperCamelCase_))
lowercase__ = torch.mean(torch.abs(UpperCamelCase_))
assert abs(result_sum.item() - 67.39_86) < 1E-2
assert abs(result_mean.item() - 0.08_78) < 1E-3
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(UpperCamelCase_))
lowercase__ = torch.mean(torch.abs(UpperCamelCase_))
assert abs(result_sum.item() - 2_30.03_99) < 1E-2
assert abs(result_mean.item() - 0.29_95) < 1E-3
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(UpperCamelCase_))
lowercase__ = torch.mean(torch.abs(UpperCamelCase_))
assert abs(result_sum.item() - 1_86.94_82) < 1E-2
assert abs(result_mean.item() - 0.24_34) < 1E-3
| 710
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {"vocab_file": "vocab.txt"}
a__ : int = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
a__ : Dict = {
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def _lowerCAmelCase ( A__ ):
with open(A__ , 'r' ) as f:
lowercase__ = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Dict="<cls>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Optional[Any]="<eos>" , **lowerCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = load_vocab_file(lowerCAmelCase)
lowercase__ = dict(enumerate(self.all_tokens))
lowercase__ = {tok: ind for ind, tok in enumerate(self.all_tokens)}
lowercase__ = unk_token
lowercase__ = cls_token
lowercase__ = pad_token
lowercase__ = mask_token
lowercase__ = eos_token
lowercase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
return text.split()
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any=False) -> Union[str, Any]:
"""simple docstring"""
return len(self._id_to_token)
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens)}
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.cls_token_id]
lowercase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase__ = [1] + ([0] * len(lowerCAmelCase)) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCAmelCase) + [1]
return mask
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(lowerCAmelCase , 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[List[str], List[AddedToken]] , lowerCAmelCase : bool = False) -> int:
"""simple docstring"""
return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase)
| 642
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = 42
A : Optional[int] = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 711
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a__ : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a__ : Optional[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a__ : Tuple = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def UpperCAmelCase ( self : int , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase)
}
| 642
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.